From 62d9e475065597bfd9c3a646af4c4faab637a599 Mon Sep 17 00:00:00 2001 From: Danny Tsen Date: Wed, 26 Apr 2023 15:11:43 -0400 Subject: [PATCH 001/149] crypto: chacha20-p10 - An optimized Chacha20 implementation with 8-way unrolling for ppc64le Improve overall performance of chacha20 encrypt and decrypt operations for Power10 or later CPU. Signed-off-by: Danny Tsen Signed-off-by: Herbert Xu --- arch/powerpc/crypto/chacha-p10le-8x.S | 842 ++++++++++++++++++++++++++ 1 file changed, 842 insertions(+) create mode 100644 arch/powerpc/crypto/chacha-p10le-8x.S diff --git a/arch/powerpc/crypto/chacha-p10le-8x.S b/arch/powerpc/crypto/chacha-p10le-8x.S new file mode 100644 index 000000000000..17bedb66b822 --- /dev/null +++ b/arch/powerpc/crypto/chacha-p10le-8x.S @@ -0,0 +1,842 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +# +# Accelerated chacha20 implementation for ppc64le. +# +# Copyright 2023- IBM Corp. All rights reserved +# +#=================================================================================== +# Written by Danny Tsen +# +# chacha_p10le_8x(u32 *state, byte *dst, const byte *src, +# size_t len, int nrounds); +# +# do rounds, 8 quarter rounds +# 1. a += b; d ^= a; d <<<= 16; +# 2. c += d; b ^= c; b <<<= 12; +# 3. a += b; d ^= a; d <<<= 8; +# 4. c += d; b ^= c; b <<<= 7 +# +# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 16 +# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 12 +# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 8 +# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 7 +# +# 4 blocks (a b c d) +# +# a0 b0 c0 d0 +# a1 b1 c1 d1 +# ... +# a4 b4 c4 d4 +# ... +# a8 b8 c8 d8 +# ... +# a12 b12 c12 d12 +# a13 ... +# a14 ... +# a15 b15 c15 d15 +# +# Column round (v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15) +# Diagnal round (v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14) +# + +#include +#include +#include +#include + +.machine "any" +.text + +.macro SAVE_GPR GPR OFFSET FRAME + std \GPR,\OFFSET(\FRAME) +.endm + +.macro SAVE_VRS VRS OFFSET FRAME + li 16, \OFFSET + stvx \VRS, 16, \FRAME +.endm + +.macro SAVE_VSX VSX OFFSET FRAME + li 16, \OFFSET + stxvx \VSX, 16, \FRAME +.endm + +.macro RESTORE_GPR GPR OFFSET FRAME + ld \GPR,\OFFSET(\FRAME) +.endm + +.macro RESTORE_VRS VRS OFFSET FRAME + li 16, \OFFSET + lvx \VRS, 16, \FRAME +.endm + +.macro RESTORE_VSX VSX OFFSET FRAME + li 16, \OFFSET + lxvx \VSX, 16, \FRAME +.endm + +.macro SAVE_REGS + mflr 0 + std 0, 16(1) + stdu 1,-752(1) + + SAVE_GPR 14, 112, 1 + SAVE_GPR 15, 120, 1 + SAVE_GPR 16, 128, 1 + SAVE_GPR 17, 136, 1 + SAVE_GPR 18, 144, 1 + SAVE_GPR 19, 152, 1 + SAVE_GPR 20, 160, 1 + SAVE_GPR 21, 168, 1 + SAVE_GPR 22, 176, 1 + SAVE_GPR 23, 184, 1 + SAVE_GPR 24, 192, 1 + SAVE_GPR 25, 200, 1 + SAVE_GPR 26, 208, 1 + SAVE_GPR 27, 216, 1 + SAVE_GPR 28, 224, 1 + SAVE_GPR 29, 232, 1 + SAVE_GPR 30, 240, 1 + SAVE_GPR 31, 248, 1 + + addi 9, 1, 256 + SAVE_VRS 20, 0, 9 + SAVE_VRS 21, 16, 9 + SAVE_VRS 22, 32, 9 + SAVE_VRS 23, 48, 9 + SAVE_VRS 24, 64, 9 + SAVE_VRS 25, 80, 9 + SAVE_VRS 26, 96, 9 + SAVE_VRS 27, 112, 9 + SAVE_VRS 28, 128, 9 + SAVE_VRS 29, 144, 9 + SAVE_VRS 30, 160, 9 + SAVE_VRS 31, 176, 9 + + SAVE_VSX 14, 192, 9 + SAVE_VSX 15, 208, 9 + SAVE_VSX 16, 224, 9 + SAVE_VSX 17, 240, 9 + SAVE_VSX 18, 256, 9 + SAVE_VSX 19, 272, 9 + SAVE_VSX 20, 288, 9 + SAVE_VSX 21, 304, 9 + SAVE_VSX 22, 320, 9 + SAVE_VSX 23, 336, 9 + SAVE_VSX 24, 352, 9 + SAVE_VSX 25, 368, 9 + SAVE_VSX 26, 384, 9 + SAVE_VSX 27, 400, 9 + SAVE_VSX 28, 416, 9 + SAVE_VSX 29, 432, 9 + SAVE_VSX 30, 448, 9 + SAVE_VSX 31, 464, 9 +.endm # SAVE_REGS + +.macro RESTORE_REGS + addi 9, 1, 256 + RESTORE_VRS 20, 0, 9 + RESTORE_VRS 21, 16, 9 + RESTORE_VRS 22, 32, 9 + RESTORE_VRS 23, 48, 9 + RESTORE_VRS 24, 64, 9 + RESTORE_VRS 25, 80, 9 + RESTORE_VRS 26, 96, 9 + RESTORE_VRS 27, 112, 9 + RESTORE_VRS 28, 128, 9 + RESTORE_VRS 29, 144, 9 + RESTORE_VRS 30, 160, 9 + RESTORE_VRS 31, 176, 9 + + RESTORE_VSX 14, 192, 9 + RESTORE_VSX 15, 208, 9 + RESTORE_VSX 16, 224, 9 + RESTORE_VSX 17, 240, 9 + RESTORE_VSX 18, 256, 9 + RESTORE_VSX 19, 272, 9 + RESTORE_VSX 20, 288, 9 + RESTORE_VSX 21, 304, 9 + RESTORE_VSX 22, 320, 9 + RESTORE_VSX 23, 336, 9 + RESTORE_VSX 24, 352, 9 + RESTORE_VSX 25, 368, 9 + RESTORE_VSX 26, 384, 9 + RESTORE_VSX 27, 400, 9 + RESTORE_VSX 28, 416, 9 + RESTORE_VSX 29, 432, 9 + RESTORE_VSX 30, 448, 9 + RESTORE_VSX 31, 464, 9 + + RESTORE_GPR 14, 112, 1 + RESTORE_GPR 15, 120, 1 + RESTORE_GPR 16, 128, 1 + RESTORE_GPR 17, 136, 1 + RESTORE_GPR 18, 144, 1 + RESTORE_GPR 19, 152, 1 + RESTORE_GPR 20, 160, 1 + RESTORE_GPR 21, 168, 1 + RESTORE_GPR 22, 176, 1 + RESTORE_GPR 23, 184, 1 + RESTORE_GPR 24, 192, 1 + RESTORE_GPR 25, 200, 1 + RESTORE_GPR 26, 208, 1 + RESTORE_GPR 27, 216, 1 + RESTORE_GPR 28, 224, 1 + RESTORE_GPR 29, 232, 1 + RESTORE_GPR 30, 240, 1 + RESTORE_GPR 31, 248, 1 + + addi 1, 1, 752 + ld 0, 16(1) + mtlr 0 +.endm # RESTORE_REGS + +.macro QT_loop_8x + # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15) + xxlor 0, 32+25, 32+25 + xxlor 32+25, 20, 20 + vadduwm 0, 0, 4 + vadduwm 1, 1, 5 + vadduwm 2, 2, 6 + vadduwm 3, 3, 7 + vadduwm 16, 16, 20 + vadduwm 17, 17, 21 + vadduwm 18, 18, 22 + vadduwm 19, 19, 23 + + vpermxor 12, 12, 0, 25 + vpermxor 13, 13, 1, 25 + vpermxor 14, 14, 2, 25 + vpermxor 15, 15, 3, 25 + vpermxor 28, 28, 16, 25 + vpermxor 29, 29, 17, 25 + vpermxor 30, 30, 18, 25 + vpermxor 31, 31, 19, 25 + xxlor 32+25, 0, 0 + vadduwm 8, 8, 12 + vadduwm 9, 9, 13 + vadduwm 10, 10, 14 + vadduwm 11, 11, 15 + vadduwm 24, 24, 28 + vadduwm 25, 25, 29 + vadduwm 26, 26, 30 + vadduwm 27, 27, 31 + vxor 4, 4, 8 + vxor 5, 5, 9 + vxor 6, 6, 10 + vxor 7, 7, 11 + vxor 20, 20, 24 + vxor 21, 21, 25 + vxor 22, 22, 26 + vxor 23, 23, 27 + + xxlor 0, 32+25, 32+25 + xxlor 32+25, 21, 21 + vrlw 4, 4, 25 # + vrlw 5, 5, 25 + vrlw 6, 6, 25 + vrlw 7, 7, 25 + vrlw 20, 20, 25 # + vrlw 21, 21, 25 + vrlw 22, 22, 25 + vrlw 23, 23, 25 + xxlor 32+25, 0, 0 + vadduwm 0, 0, 4 + vadduwm 1, 1, 5 + vadduwm 2, 2, 6 + vadduwm 3, 3, 7 + vadduwm 16, 16, 20 + vadduwm 17, 17, 21 + vadduwm 18, 18, 22 + vadduwm 19, 19, 23 + + xxlor 0, 32+25, 32+25 + xxlor 32+25, 22, 22 + vpermxor 12, 12, 0, 25 + vpermxor 13, 13, 1, 25 + vpermxor 14, 14, 2, 25 + vpermxor 15, 15, 3, 25 + vpermxor 28, 28, 16, 25 + vpermxor 29, 29, 17, 25 + vpermxor 30, 30, 18, 25 + vpermxor 31, 31, 19, 25 + xxlor 32+25, 0, 0 + vadduwm 8, 8, 12 + vadduwm 9, 9, 13 + vadduwm 10, 10, 14 + vadduwm 11, 11, 15 + vadduwm 24, 24, 28 + vadduwm 25, 25, 29 + vadduwm 26, 26, 30 + vadduwm 27, 27, 31 + xxlor 0, 32+28, 32+28 + xxlor 32+28, 23, 23 + vxor 4, 4, 8 + vxor 5, 5, 9 + vxor 6, 6, 10 + vxor 7, 7, 11 + vxor 20, 20, 24 + vxor 21, 21, 25 + vxor 22, 22, 26 + vxor 23, 23, 27 + vrlw 4, 4, 28 # + vrlw 5, 5, 28 + vrlw 6, 6, 28 + vrlw 7, 7, 28 + vrlw 20, 20, 28 # + vrlw 21, 21, 28 + vrlw 22, 22, 28 + vrlw 23, 23, 28 + xxlor 32+28, 0, 0 + + # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14) + xxlor 0, 32+25, 32+25 + xxlor 32+25, 20, 20 + vadduwm 0, 0, 5 + vadduwm 1, 1, 6 + vadduwm 2, 2, 7 + vadduwm 3, 3, 4 + vadduwm 16, 16, 21 + vadduwm 17, 17, 22 + vadduwm 18, 18, 23 + vadduwm 19, 19, 20 + + vpermxor 15, 15, 0, 25 + vpermxor 12, 12, 1, 25 + vpermxor 13, 13, 2, 25 + vpermxor 14, 14, 3, 25 + vpermxor 31, 31, 16, 25 + vpermxor 28, 28, 17, 25 + vpermxor 29, 29, 18, 25 + vpermxor 30, 30, 19, 25 + + xxlor 32+25, 0, 0 + vadduwm 10, 10, 15 + vadduwm 11, 11, 12 + vadduwm 8, 8, 13 + vadduwm 9, 9, 14 + vadduwm 26, 26, 31 + vadduwm 27, 27, 28 + vadduwm 24, 24, 29 + vadduwm 25, 25, 30 + vxor 5, 5, 10 + vxor 6, 6, 11 + vxor 7, 7, 8 + vxor 4, 4, 9 + vxor 21, 21, 26 + vxor 22, 22, 27 + vxor 23, 23, 24 + vxor 20, 20, 25 + + xxlor 0, 32+25, 32+25 + xxlor 32+25, 21, 21 + vrlw 5, 5, 25 + vrlw 6, 6, 25 + vrlw 7, 7, 25 + vrlw 4, 4, 25 + vrlw 21, 21, 25 + vrlw 22, 22, 25 + vrlw 23, 23, 25 + vrlw 20, 20, 25 + xxlor 32+25, 0, 0 + + vadduwm 0, 0, 5 + vadduwm 1, 1, 6 + vadduwm 2, 2, 7 + vadduwm 3, 3, 4 + vadduwm 16, 16, 21 + vadduwm 17, 17, 22 + vadduwm 18, 18, 23 + vadduwm 19, 19, 20 + + xxlor 0, 32+25, 32+25 + xxlor 32+25, 22, 22 + vpermxor 15, 15, 0, 25 + vpermxor 12, 12, 1, 25 + vpermxor 13, 13, 2, 25 + vpermxor 14, 14, 3, 25 + vpermxor 31, 31, 16, 25 + vpermxor 28, 28, 17, 25 + vpermxor 29, 29, 18, 25 + vpermxor 30, 30, 19, 25 + xxlor 32+25, 0, 0 + + vadduwm 10, 10, 15 + vadduwm 11, 11, 12 + vadduwm 8, 8, 13 + vadduwm 9, 9, 14 + vadduwm 26, 26, 31 + vadduwm 27, 27, 28 + vadduwm 24, 24, 29 + vadduwm 25, 25, 30 + + xxlor 0, 32+28, 32+28 + xxlor 32+28, 23, 23 + vxor 5, 5, 10 + vxor 6, 6, 11 + vxor 7, 7, 8 + vxor 4, 4, 9 + vxor 21, 21, 26 + vxor 22, 22, 27 + vxor 23, 23, 24 + vxor 20, 20, 25 + vrlw 5, 5, 28 + vrlw 6, 6, 28 + vrlw 7, 7, 28 + vrlw 4, 4, 28 + vrlw 21, 21, 28 + vrlw 22, 22, 28 + vrlw 23, 23, 28 + vrlw 20, 20, 28 + xxlor 32+28, 0, 0 +.endm + +.macro QT_loop_4x + # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15) + vadduwm 0, 0, 4 + vadduwm 1, 1, 5 + vadduwm 2, 2, 6 + vadduwm 3, 3, 7 + vpermxor 12, 12, 0, 20 + vpermxor 13, 13, 1, 20 + vpermxor 14, 14, 2, 20 + vpermxor 15, 15, 3, 20 + vadduwm 8, 8, 12 + vadduwm 9, 9, 13 + vadduwm 10, 10, 14 + vadduwm 11, 11, 15 + vxor 4, 4, 8 + vxor 5, 5, 9 + vxor 6, 6, 10 + vxor 7, 7, 11 + vrlw 4, 4, 21 + vrlw 5, 5, 21 + vrlw 6, 6, 21 + vrlw 7, 7, 21 + vadduwm 0, 0, 4 + vadduwm 1, 1, 5 + vadduwm 2, 2, 6 + vadduwm 3, 3, 7 + vpermxor 12, 12, 0, 22 + vpermxor 13, 13, 1, 22 + vpermxor 14, 14, 2, 22 + vpermxor 15, 15, 3, 22 + vadduwm 8, 8, 12 + vadduwm 9, 9, 13 + vadduwm 10, 10, 14 + vadduwm 11, 11, 15 + vxor 4, 4, 8 + vxor 5, 5, 9 + vxor 6, 6, 10 + vxor 7, 7, 11 + vrlw 4, 4, 23 + vrlw 5, 5, 23 + vrlw 6, 6, 23 + vrlw 7, 7, 23 + + # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14) + vadduwm 0, 0, 5 + vadduwm 1, 1, 6 + vadduwm 2, 2, 7 + vadduwm 3, 3, 4 + vpermxor 15, 15, 0, 20 + vpermxor 12, 12, 1, 20 + vpermxor 13, 13, 2, 20 + vpermxor 14, 14, 3, 20 + vadduwm 10, 10, 15 + vadduwm 11, 11, 12 + vadduwm 8, 8, 13 + vadduwm 9, 9, 14 + vxor 5, 5, 10 + vxor 6, 6, 11 + vxor 7, 7, 8 + vxor 4, 4, 9 + vrlw 5, 5, 21 + vrlw 6, 6, 21 + vrlw 7, 7, 21 + vrlw 4, 4, 21 + vadduwm 0, 0, 5 + vadduwm 1, 1, 6 + vadduwm 2, 2, 7 + vadduwm 3, 3, 4 + vpermxor 15, 15, 0, 22 + vpermxor 12, 12, 1, 22 + vpermxor 13, 13, 2, 22 + vpermxor 14, 14, 3, 22 + vadduwm 10, 10, 15 + vadduwm 11, 11, 12 + vadduwm 8, 8, 13 + vadduwm 9, 9, 14 + vxor 5, 5, 10 + vxor 6, 6, 11 + vxor 7, 7, 8 + vxor 4, 4, 9 + vrlw 5, 5, 23 + vrlw 6, 6, 23 + vrlw 7, 7, 23 + vrlw 4, 4, 23 +.endm + +# Transpose +.macro TP_4x a0 a1 a2 a3 + xxmrghw 10, 32+\a0, 32+\a1 # a0, a1, b0, b1 + xxmrghw 11, 32+\a2, 32+\a3 # a2, a3, b2, b3 + xxmrglw 12, 32+\a0, 32+\a1 # c0, c1, d0, d1 + xxmrglw 13, 32+\a2, 32+\a3 # c2, c3, d2, d3 + xxpermdi 32+\a0, 10, 11, 0 # a0, a1, a2, a3 + xxpermdi 32+\a1, 10, 11, 3 # b0, b1, b2, b3 + xxpermdi 32+\a2, 12, 13, 0 # c0, c1, c2, c3 + xxpermdi 32+\a3, 12, 13, 3 # d0, d1, d2, d3 +.endm + +# key stream = working state + state +.macro Add_state S + vadduwm \S+0, \S+0, 16-\S + vadduwm \S+4, \S+4, 17-\S + vadduwm \S+8, \S+8, 18-\S + vadduwm \S+12, \S+12, 19-\S + + vadduwm \S+1, \S+1, 16-\S + vadduwm \S+5, \S+5, 17-\S + vadduwm \S+9, \S+9, 18-\S + vadduwm \S+13, \S+13, 19-\S + + vadduwm \S+2, \S+2, 16-\S + vadduwm \S+6, \S+6, 17-\S + vadduwm \S+10, \S+10, 18-\S + vadduwm \S+14, \S+14, 19-\S + + vadduwm \S+3, \S+3, 16-\S + vadduwm \S+7, \S+7, 17-\S + vadduwm \S+11, \S+11, 18-\S + vadduwm \S+15, \S+15, 19-\S +.endm + +# +# write 256 bytes +# +.macro Write_256 S + add 9, 14, 5 + add 16, 14, 4 + lxvw4x 0, 0, 9 + lxvw4x 1, 17, 9 + lxvw4x 2, 18, 9 + lxvw4x 3, 19, 9 + lxvw4x 4, 20, 9 + lxvw4x 5, 21, 9 + lxvw4x 6, 22, 9 + lxvw4x 7, 23, 9 + lxvw4x 8, 24, 9 + lxvw4x 9, 25, 9 + lxvw4x 10, 26, 9 + lxvw4x 11, 27, 9 + lxvw4x 12, 28, 9 + lxvw4x 13, 29, 9 + lxvw4x 14, 30, 9 + lxvw4x 15, 31, 9 + + xxlxor \S+32, \S+32, 0 + xxlxor \S+36, \S+36, 1 + xxlxor \S+40, \S+40, 2 + xxlxor \S+44, \S+44, 3 + xxlxor \S+33, \S+33, 4 + xxlxor \S+37, \S+37, 5 + xxlxor \S+41, \S+41, 6 + xxlxor \S+45, \S+45, 7 + xxlxor \S+34, \S+34, 8 + xxlxor \S+38, \S+38, 9 + xxlxor \S+42, \S+42, 10 + xxlxor \S+46, \S+46, 11 + xxlxor \S+35, \S+35, 12 + xxlxor \S+39, \S+39, 13 + xxlxor \S+43, \S+43, 14 + xxlxor \S+47, \S+47, 15 + + stxvw4x \S+32, 0, 16 + stxvw4x \S+36, 17, 16 + stxvw4x \S+40, 18, 16 + stxvw4x \S+44, 19, 16 + + stxvw4x \S+33, 20, 16 + stxvw4x \S+37, 21, 16 + stxvw4x \S+41, 22, 16 + stxvw4x \S+45, 23, 16 + + stxvw4x \S+34, 24, 16 + stxvw4x \S+38, 25, 16 + stxvw4x \S+42, 26, 16 + stxvw4x \S+46, 27, 16 + + stxvw4x \S+35, 28, 16 + stxvw4x \S+39, 29, 16 + stxvw4x \S+43, 30, 16 + stxvw4x \S+47, 31, 16 + +.endm + +# +# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds); +# +SYM_FUNC_START(chacha_p10le_8x) +.align 5 + cmpdi 6, 0 + ble Out_no_chacha + + SAVE_REGS + + # r17 - r31 mainly for Write_256 macro. + li 17, 16 + li 18, 32 + li 19, 48 + li 20, 64 + li 21, 80 + li 22, 96 + li 23, 112 + li 24, 128 + li 25, 144 + li 26, 160 + li 27, 176 + li 28, 192 + li 29, 208 + li 30, 224 + li 31, 240 + + mr 15, 6 # len + li 14, 0 # offset to inp and outp + + lxvw4x 48, 0, 3 # vr16, constants + lxvw4x 49, 17, 3 # vr17, key 1 + lxvw4x 50, 18, 3 # vr18, key 2 + lxvw4x 51, 19, 3 # vr19, counter, nonce + + # create (0, 1, 2, 3) counters + vspltisw 0, 0 + vspltisw 1, 1 + vspltisw 2, 2 + vspltisw 3, 3 + vmrghw 4, 0, 1 + vmrglw 5, 2, 3 + vsldoi 30, 4, 5, 8 # vr30 counter, 4 (0, 1, 2, 3) + + vspltisw 21, 12 + vspltisw 23, 7 + + addis 11, 2, permx@toc@ha + addi 11, 11, permx@toc@l + lxvw4x 32+20, 0, 11 + lxvw4x 32+22, 17, 11 + + sradi 8, 7, 1 + + mtctr 8 + + # save constants to vsx + xxlor 16, 48, 48 + xxlor 17, 49, 49 + xxlor 18, 50, 50 + xxlor 19, 51, 51 + + vspltisw 25, 4 + vspltisw 26, 8 + + xxlor 25, 32+26, 32+26 + xxlor 24, 32+25, 32+25 + + vadduwm 31, 30, 25 # counter = (0, 1, 2, 3) + (4, 4, 4, 4) + xxlor 30, 32+30, 32+30 + xxlor 31, 32+31, 32+31 + + xxlor 20, 32+20, 32+20 + xxlor 21, 32+21, 32+21 + xxlor 22, 32+22, 32+22 + xxlor 23, 32+23, 32+23 + + cmpdi 6, 512 + blt Loop_last + +Loop_8x: + xxspltw 32+0, 16, 0 + xxspltw 32+1, 16, 1 + xxspltw 32+2, 16, 2 + xxspltw 32+3, 16, 3 + + xxspltw 32+4, 17, 0 + xxspltw 32+5, 17, 1 + xxspltw 32+6, 17, 2 + xxspltw 32+7, 17, 3 + xxspltw 32+8, 18, 0 + xxspltw 32+9, 18, 1 + xxspltw 32+10, 18, 2 + xxspltw 32+11, 18, 3 + xxspltw 32+12, 19, 0 + xxspltw 32+13, 19, 1 + xxspltw 32+14, 19, 2 + xxspltw 32+15, 19, 3 + vadduwm 12, 12, 30 # increase counter + + xxspltw 32+16, 16, 0 + xxspltw 32+17, 16, 1 + xxspltw 32+18, 16, 2 + xxspltw 32+19, 16, 3 + + xxspltw 32+20, 17, 0 + xxspltw 32+21, 17, 1 + xxspltw 32+22, 17, 2 + xxspltw 32+23, 17, 3 + xxspltw 32+24, 18, 0 + xxspltw 32+25, 18, 1 + xxspltw 32+26, 18, 2 + xxspltw 32+27, 18, 3 + xxspltw 32+28, 19, 0 + xxspltw 32+29, 19, 1 + vadduwm 28, 28, 31 # increase counter + xxspltw 32+30, 19, 2 + xxspltw 32+31, 19, 3 + +.align 5 +quarter_loop_8x: + QT_loop_8x + + bdnz quarter_loop_8x + + xxlor 0, 32+30, 32+30 + xxlor 32+30, 30, 30 + vadduwm 12, 12, 30 + xxlor 32+30, 0, 0 + TP_4x 0, 1, 2, 3 + TP_4x 4, 5, 6, 7 + TP_4x 8, 9, 10, 11 + TP_4x 12, 13, 14, 15 + + xxlor 0, 48, 48 + xxlor 1, 49, 49 + xxlor 2, 50, 50 + xxlor 3, 51, 51 + xxlor 48, 16, 16 + xxlor 49, 17, 17 + xxlor 50, 18, 18 + xxlor 51, 19, 19 + Add_state 0 + xxlor 48, 0, 0 + xxlor 49, 1, 1 + xxlor 50, 2, 2 + xxlor 51, 3, 3 + Write_256 0 + addi 14, 14, 256 # offset +=256 + addi 15, 15, -256 # len -=256 + + xxlor 5, 32+31, 32+31 + xxlor 32+31, 31, 31 + vadduwm 28, 28, 31 + xxlor 32+31, 5, 5 + TP_4x 16+0, 16+1, 16+2, 16+3 + TP_4x 16+4, 16+5, 16+6, 16+7 + TP_4x 16+8, 16+9, 16+10, 16+11 + TP_4x 16+12, 16+13, 16+14, 16+15 + + xxlor 32, 16, 16 + xxlor 33, 17, 17 + xxlor 34, 18, 18 + xxlor 35, 19, 19 + Add_state 16 + Write_256 16 + addi 14, 14, 256 # offset +=256 + addi 15, 15, -256 # len +=256 + + xxlor 32+24, 24, 24 + xxlor 32+25, 25, 25 + xxlor 32+30, 30, 30 + vadduwm 30, 30, 25 + vadduwm 31, 30, 24 + xxlor 30, 32+30, 32+30 + xxlor 31, 32+31, 32+31 + + cmpdi 15, 0 + beq Out_loop + + cmpdi 15, 512 + blt Loop_last + + mtctr 8 + b Loop_8x + +Loop_last: + lxvw4x 48, 0, 3 # vr16, constants + lxvw4x 49, 17, 3 # vr17, key 1 + lxvw4x 50, 18, 3 # vr18, key 2 + lxvw4x 51, 19, 3 # vr19, counter, nonce + + vspltisw 21, 12 + vspltisw 23, 7 + addis 11, 2, permx@toc@ha + addi 11, 11, permx@toc@l + lxvw4x 32+20, 0, 11 + lxvw4x 32+22, 17, 11 + + sradi 8, 7, 1 + mtctr 8 + +Loop_4x: + vspltw 0, 16, 0 + vspltw 1, 16, 1 + vspltw 2, 16, 2 + vspltw 3, 16, 3 + + vspltw 4, 17, 0 + vspltw 5, 17, 1 + vspltw 6, 17, 2 + vspltw 7, 17, 3 + vspltw 8, 18, 0 + vspltw 9, 18, 1 + vspltw 10, 18, 2 + vspltw 11, 18, 3 + vspltw 12, 19, 0 + vadduwm 12, 12, 30 # increase counter + vspltw 13, 19, 1 + vspltw 14, 19, 2 + vspltw 15, 19, 3 + +.align 5 +quarter_loop: + QT_loop_4x + + bdnz quarter_loop + + vadduwm 12, 12, 30 + TP_4x 0, 1, 2, 3 + TP_4x 4, 5, 6, 7 + TP_4x 8, 9, 10, 11 + TP_4x 12, 13, 14, 15 + + Add_state 0 + Write_256 0 + addi 14, 14, 256 # offset += 256 + addi 15, 15, -256 # len += 256 + + # Update state counter + vspltisw 25, 4 + vadduwm 30, 30, 25 + + cmpdi 15, 0 + beq Out_loop + cmpdi 15, 256 + blt Out_loop + + mtctr 8 + b Loop_4x + +Out_loop: + RESTORE_REGS + blr + +Out_no_chacha: + li 3, 0 + blr +SYM_FUNC_END(chacha_p10le_8x) + +SYM_DATA_START_LOCAL(PERMX) +.align 5 +permx: +.long 0x22330011, 0x66774455, 0xaabb8899, 0xeeffccdd +.long 0x11223300, 0x55667744, 0x99aabb88, 0xddeeffcc +SYM_DATA_END(PERMX) From a09450e59c5a93aa13164a3024f9c1ba02ced874 Mon Sep 17 00:00:00 2001 From: Danny Tsen Date: Wed, 26 Apr 2023 15:11:44 -0400 Subject: [PATCH 002/149] crypt: chacha20-p10 - Glue code for optmized Chacha20 implementation for ppc64le Signed-off-by: Danny Tsen Signed-off-by: Herbert Xu --- arch/powerpc/crypto/chacha-p10-glue.c | 221 ++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 arch/powerpc/crypto/chacha-p10-glue.c diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c new file mode 100644 index 000000000000..74fb86b0d209 --- /dev/null +++ b/arch/powerpc/crypto/chacha-p10-glue.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers, + * including ChaCha20 (RFC7539) + * + * Copyright 2023- IBM Corp. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src, + unsigned int len, int nrounds); + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); + +static void vsx_begin(void) +{ + preempt_disable(); + enable_kernel_vsx(); +} + +static void vsx_end(void) +{ + disable_kernel_vsx(); + preempt_enable(); +} + +static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src, + unsigned int bytes, int nrounds) +{ + unsigned int l = bytes & ~0x0FF; + + if (l > 0) { + chacha_p10le_8x(state, dst, src, l, nrounds); + bytes -= l; + src += l; + dst += l; + state[12] += l / CHACHA_BLOCK_SIZE; + } + + if (bytes > 0) + chacha_crypt_generic(state, dst, src, bytes, nrounds); +} + +void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) +{ + hchacha_block_generic(state, stream, nrounds); +} +EXPORT_SYMBOL(hchacha_block_arch); + +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) +{ + chacha_init_generic(state, key, iv); +} +EXPORT_SYMBOL(chacha_init_arch); + +void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, + int nrounds) +{ + if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE || + !crypto_simd_usable()) + return chacha_crypt_generic(state, dst, src, bytes, nrounds); + + do { + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); + + vsx_begin(); + chacha_p10_do_8x(state, dst, src, todo, nrounds); + vsx_end(); + + bytes -= todo; + src += todo; + dst += todo; + } while (bytes); +} +EXPORT_SYMBOL(chacha_crypt_arch); + +static int chacha_p10_stream_xor(struct skcipher_request *req, + const struct chacha_ctx *ctx, const u8 *iv) +{ + struct skcipher_walk walk; + u32 state[16]; + int err; + + err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; + + chacha_init_generic(state, ctx->key, iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = rounddown(nbytes, walk.stride); + + if (!crypto_simd_usable()) { + chacha_crypt_generic(state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, + ctx->nrounds); + } else { + vsx_begin(); + chacha_p10_do_8x(state, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, ctx->nrounds); + vsx_end(); + } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + if (err) + break; + } + + return err; +} + +static int chacha_p10(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + + return chacha_p10_stream_xor(req, ctx, req->iv); +} + +static int xchacha_p10(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); + struct chacha_ctx subctx; + u32 state[16]; + u8 real_iv[16]; + + chacha_init_generic(state, ctx->key, req->iv); + hchacha_block_arch(state, subctx.key, ctx->nrounds); + subctx.nrounds = ctx->nrounds; + + memcpy(&real_iv[0], req->iv + 24, 8); + memcpy(&real_iv[8], req->iv + 16, 8); + return chacha_p10_stream_xor(req, &subctx, real_iv); +} + +static struct skcipher_alg algs[] = { + { + .base.cra_name = "chacha20", + .base.cra_driver_name = "chacha20-p10", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = CHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = chacha_p10, + .decrypt = chacha_p10, + }, { + .base.cra_name = "xchacha20", + .base.cra_driver_name = "xchacha20-p10", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha20_setkey, + .encrypt = xchacha_p10, + .decrypt = xchacha_p10, + }, { + .base.cra_name = "xchacha12", + .base.cra_driver_name = "xchacha12-p10", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct chacha_ctx), + .base.cra_module = THIS_MODULE, + + .min_keysize = CHACHA_KEY_SIZE, + .max_keysize = CHACHA_KEY_SIZE, + .ivsize = XCHACHA_IV_SIZE, + .chunksize = CHACHA_BLOCK_SIZE, + .setkey = chacha12_setkey, + .encrypt = xchacha_p10, + .decrypt = xchacha_p10, + } +}; + +static int __init chacha_p10_init(void) +{ + static_branch_enable(&have_p10); + + return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); +} + +static void __exit chacha_p10_exit(void) +{ + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); +} + +module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init); +module_exit(chacha_p10_exit); + +MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)"); +MODULE_AUTHOR("Danny Tsen "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("chacha20"); +MODULE_ALIAS_CRYPTO("chacha20-p10"); +MODULE_ALIAS_CRYPTO("xchacha20"); +MODULE_ALIAS_CRYPTO("xchacha20-p10"); +MODULE_ALIAS_CRYPTO("xchacha12"); +MODULE_ALIAS_CRYPTO("xchacha12-p10"); From 09ef057bd2a125e01fe841c4f40ca9fb97cc5cd4 Mon Sep 17 00:00:00 2001 From: Danny Tsen Date: Wed, 26 Apr 2023 15:11:45 -0400 Subject: [PATCH 003/149] crypto: poly1305-p10 - An optimized Poly1305 implementation with 4-way unrolling for ppc64le Improve overall performance of Poly1305 for Power10 or later CPU. Signed-off-by: Danny Tsen Signed-off-by: Herbert Xu --- arch/powerpc/crypto/poly1305-p10le_64.S | 1075 +++++++++++++++++++++++ 1 file changed, 1075 insertions(+) create mode 100644 arch/powerpc/crypto/poly1305-p10le_64.S diff --git a/arch/powerpc/crypto/poly1305-p10le_64.S b/arch/powerpc/crypto/poly1305-p10le_64.S new file mode 100644 index 000000000000..a3c1987f1ecd --- /dev/null +++ b/arch/powerpc/crypto/poly1305-p10le_64.S @@ -0,0 +1,1075 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +# +# Accelerated poly1305 implementation for ppc64le. +# +# Copyright 2023- IBM Corp. All rights reserved +# +#=================================================================================== +# Written by Danny Tsen +# +# Poly1305 - this version mainly using vector/VSX/Scalar +# - 26 bits limbs +# - Handle multiple 64 byte blcok. +# +# Block size 16 bytes +# key = (r, s) +# clamp r &= 0x0FFFFFFC0FFFFFFC 0x0FFFFFFC0FFFFFFF +# p = 2^130 - 5 +# a += m +# a = (r + a) % p +# a += s +# +# Improve performance by breaking down polynominal to the sum of products with +# h4 = m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r +# +# 07/22/21 - this revison based on the above sum of products. Setup r^4, r^3, r^2, r and s3, s2, s1, s0 +# to 9 vectors for multiplications. +# +# setup r^4, r^3, r^2, r vectors +# vs [r^1, r^3, r^2, r^4] +# vs0 = [r0,.....] +# vs1 = [r1,.....] +# vs2 = [r2,.....] +# vs3 = [r3,.....] +# vs4 = [r4,.....] +# vs5 = [r1*5,...] +# vs6 = [r2*5,...] +# vs7 = [r2*5,...] +# vs8 = [r4*5,...] +# +# Each word in a vector consists a member of a "r/s" in [a * r/s]. +# +# r0, r4*5, r3*5, r2*5, r1*5; +# r1, r0, r4*5, r3*5, r2*5; +# r2, r1, r0, r4*5, r3*5; +# r3, r2, r1, r0, r4*5; +# r4, r3, r2, r1, r0 ; +# +# +# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m) +# k = 32 bytes key +# r3 = k (r, s) +# r4 = mlen +# r5 = m +# +#include +#include +#include +#include + +.machine "any" + +.text + +.macro SAVE_GPR GPR OFFSET FRAME + std \GPR,\OFFSET(\FRAME) +.endm + +.macro SAVE_VRS VRS OFFSET FRAME + li 16, \OFFSET + stvx \VRS, 16, \FRAME +.endm + +.macro SAVE_VSX VSX OFFSET FRAME + li 16, \OFFSET + stxvx \VSX, 16, \FRAME +.endm + +.macro RESTORE_GPR GPR OFFSET FRAME + ld \GPR,\OFFSET(\FRAME) +.endm + +.macro RESTORE_VRS VRS OFFSET FRAME + li 16, \OFFSET + lvx \VRS, 16, \FRAME +.endm + +.macro RESTORE_VSX VSX OFFSET FRAME + li 16, \OFFSET + lxvx \VSX, 16, \FRAME +.endm + +.macro SAVE_REGS + mflr 0 + std 0, 16(1) + stdu 1,-752(1) + + SAVE_GPR 14, 112, 1 + SAVE_GPR 15, 120, 1 + SAVE_GPR 16, 128, 1 + SAVE_GPR 17, 136, 1 + SAVE_GPR 18, 144, 1 + SAVE_GPR 19, 152, 1 + SAVE_GPR 20, 160, 1 + SAVE_GPR 21, 168, 1 + SAVE_GPR 22, 176, 1 + SAVE_GPR 23, 184, 1 + SAVE_GPR 24, 192, 1 + SAVE_GPR 25, 200, 1 + SAVE_GPR 26, 208, 1 + SAVE_GPR 27, 216, 1 + SAVE_GPR 28, 224, 1 + SAVE_GPR 29, 232, 1 + SAVE_GPR 30, 240, 1 + SAVE_GPR 31, 248, 1 + + addi 9, 1, 256 + SAVE_VRS 20, 0, 9 + SAVE_VRS 21, 16, 9 + SAVE_VRS 22, 32, 9 + SAVE_VRS 23, 48, 9 + SAVE_VRS 24, 64, 9 + SAVE_VRS 25, 80, 9 + SAVE_VRS 26, 96, 9 + SAVE_VRS 27, 112, 9 + SAVE_VRS 28, 128, 9 + SAVE_VRS 29, 144, 9 + SAVE_VRS 30, 160, 9 + SAVE_VRS 31, 176, 9 + + SAVE_VSX 14, 192, 9 + SAVE_VSX 15, 208, 9 + SAVE_VSX 16, 224, 9 + SAVE_VSX 17, 240, 9 + SAVE_VSX 18, 256, 9 + SAVE_VSX 19, 272, 9 + SAVE_VSX 20, 288, 9 + SAVE_VSX 21, 304, 9 + SAVE_VSX 22, 320, 9 + SAVE_VSX 23, 336, 9 + SAVE_VSX 24, 352, 9 + SAVE_VSX 25, 368, 9 + SAVE_VSX 26, 384, 9 + SAVE_VSX 27, 400, 9 + SAVE_VSX 28, 416, 9 + SAVE_VSX 29, 432, 9 + SAVE_VSX 30, 448, 9 + SAVE_VSX 31, 464, 9 +.endm # SAVE_REGS + +.macro RESTORE_REGS + addi 9, 1, 256 + RESTORE_VRS 20, 0, 9 + RESTORE_VRS 21, 16, 9 + RESTORE_VRS 22, 32, 9 + RESTORE_VRS 23, 48, 9 + RESTORE_VRS 24, 64, 9 + RESTORE_VRS 25, 80, 9 + RESTORE_VRS 26, 96, 9 + RESTORE_VRS 27, 112, 9 + RESTORE_VRS 28, 128, 9 + RESTORE_VRS 29, 144, 9 + RESTORE_VRS 30, 160, 9 + RESTORE_VRS 31, 176, 9 + + RESTORE_VSX 14, 192, 9 + RESTORE_VSX 15, 208, 9 + RESTORE_VSX 16, 224, 9 + RESTORE_VSX 17, 240, 9 + RESTORE_VSX 18, 256, 9 + RESTORE_VSX 19, 272, 9 + RESTORE_VSX 20, 288, 9 + RESTORE_VSX 21, 304, 9 + RESTORE_VSX 22, 320, 9 + RESTORE_VSX 23, 336, 9 + RESTORE_VSX 24, 352, 9 + RESTORE_VSX 25, 368, 9 + RESTORE_VSX 26, 384, 9 + RESTORE_VSX 27, 400, 9 + RESTORE_VSX 28, 416, 9 + RESTORE_VSX 29, 432, 9 + RESTORE_VSX 30, 448, 9 + RESTORE_VSX 31, 464, 9 + + RESTORE_GPR 14, 112, 1 + RESTORE_GPR 15, 120, 1 + RESTORE_GPR 16, 128, 1 + RESTORE_GPR 17, 136, 1 + RESTORE_GPR 18, 144, 1 + RESTORE_GPR 19, 152, 1 + RESTORE_GPR 20, 160, 1 + RESTORE_GPR 21, 168, 1 + RESTORE_GPR 22, 176, 1 + RESTORE_GPR 23, 184, 1 + RESTORE_GPR 24, 192, 1 + RESTORE_GPR 25, 200, 1 + RESTORE_GPR 26, 208, 1 + RESTORE_GPR 27, 216, 1 + RESTORE_GPR 28, 224, 1 + RESTORE_GPR 29, 232, 1 + RESTORE_GPR 30, 240, 1 + RESTORE_GPR 31, 248, 1 + + addi 1, 1, 752 + ld 0, 16(1) + mtlr 0 +.endm # RESTORE_REGS + +# +# p[0] = a0*r0 + a1*r4*5 + a2*r3*5 + a3*r2*5 + a4*r1*5; +# p[1] = a0*r1 + a1*r0 + a2*r4*5 + a3*r3*5 + a4*r2*5; +# p[2] = a0*r2 + a1*r1 + a2*r0 + a3*r4*5 + a4*r3*5; +# p[3] = a0*r3 + a1*r2 + a2*r1 + a3*r0 + a4*r4*5; +# p[4] = a0*r4 + a1*r3 + a2*r2 + a3*r1 + a4*r0 ; +# +# [r^2, r^3, r^1, r^4] +# [m3, m2, m4, m1] +# +# multiply odd and even words +.macro mul_odd + vmulouw 14, 4, 26 + vmulouw 10, 5, 3 + vmulouw 11, 6, 2 + vmulouw 12, 7, 1 + vmulouw 13, 8, 0 + vmulouw 15, 4, 27 + vaddudm 14, 14, 10 + vaddudm 14, 14, 11 + vmulouw 10, 5, 26 + vmulouw 11, 6, 3 + vaddudm 14, 14, 12 + vaddudm 14, 14, 13 # x0 + vaddudm 15, 15, 10 + vaddudm 15, 15, 11 + vmulouw 12, 7, 2 + vmulouw 13, 8, 1 + vaddudm 15, 15, 12 + vaddudm 15, 15, 13 # x1 + vmulouw 16, 4, 28 + vmulouw 10, 5, 27 + vmulouw 11, 6, 26 + vaddudm 16, 16, 10 + vaddudm 16, 16, 11 + vmulouw 12, 7, 3 + vmulouw 13, 8, 2 + vaddudm 16, 16, 12 + vaddudm 16, 16, 13 # x2 + vmulouw 17, 4, 29 + vmulouw 10, 5, 28 + vmulouw 11, 6, 27 + vaddudm 17, 17, 10 + vaddudm 17, 17, 11 + vmulouw 12, 7, 26 + vmulouw 13, 8, 3 + vaddudm 17, 17, 12 + vaddudm 17, 17, 13 # x3 + vmulouw 18, 4, 30 + vmulouw 10, 5, 29 + vmulouw 11, 6, 28 + vaddudm 18, 18, 10 + vaddudm 18, 18, 11 + vmulouw 12, 7, 27 + vmulouw 13, 8, 26 + vaddudm 18, 18, 12 + vaddudm 18, 18, 13 # x4 +.endm + +.macro mul_even + vmuleuw 9, 4, 26 + vmuleuw 10, 5, 3 + vmuleuw 11, 6, 2 + vmuleuw 12, 7, 1 + vmuleuw 13, 8, 0 + vaddudm 14, 14, 9 + vaddudm 14, 14, 10 + vaddudm 14, 14, 11 + vaddudm 14, 14, 12 + vaddudm 14, 14, 13 # x0 + + vmuleuw 9, 4, 27 + vmuleuw 10, 5, 26 + vmuleuw 11, 6, 3 + vmuleuw 12, 7, 2 + vmuleuw 13, 8, 1 + vaddudm 15, 15, 9 + vaddudm 15, 15, 10 + vaddudm 15, 15, 11 + vaddudm 15, 15, 12 + vaddudm 15, 15, 13 # x1 + + vmuleuw 9, 4, 28 + vmuleuw 10, 5, 27 + vmuleuw 11, 6, 26 + vmuleuw 12, 7, 3 + vmuleuw 13, 8, 2 + vaddudm 16, 16, 9 + vaddudm 16, 16, 10 + vaddudm 16, 16, 11 + vaddudm 16, 16, 12 + vaddudm 16, 16, 13 # x2 + + vmuleuw 9, 4, 29 + vmuleuw 10, 5, 28 + vmuleuw 11, 6, 27 + vmuleuw 12, 7, 26 + vmuleuw 13, 8, 3 + vaddudm 17, 17, 9 + vaddudm 17, 17, 10 + vaddudm 17, 17, 11 + vaddudm 17, 17, 12 + vaddudm 17, 17, 13 # x3 + + vmuleuw 9, 4, 30 + vmuleuw 10, 5, 29 + vmuleuw 11, 6, 28 + vmuleuw 12, 7, 27 + vmuleuw 13, 8, 26 + vaddudm 18, 18, 9 + vaddudm 18, 18, 10 + vaddudm 18, 18, 11 + vaddudm 18, 18, 12 + vaddudm 18, 18, 13 # x4 +.endm + +# +# poly1305_setup_r +# +# setup r^4, r^3, r^2, r vectors +# [r, r^3, r^2, r^4] +# vs0 = [r0,...] +# vs1 = [r1,...] +# vs2 = [r2,...] +# vs3 = [r3,...] +# vs4 = [r4,...] +# vs5 = [r4*5,...] +# vs6 = [r3*5,...] +# vs7 = [r2*5,...] +# vs8 = [r1*5,...] +# +# r0, r4*5, r3*5, r2*5, r1*5; +# r1, r0, r4*5, r3*5, r2*5; +# r2, r1, r0, r4*5, r3*5; +# r3, r2, r1, r0, r4*5; +# r4, r3, r2, r1, r0 ; +# +.macro poly1305_setup_r + + # save r + xxlor 26, 58, 58 + xxlor 27, 59, 59 + xxlor 28, 60, 60 + xxlor 29, 61, 61 + xxlor 30, 62, 62 + + xxlxor 31, 31, 31 + +# [r, r^3, r^2, r^4] + # compute r^2 + vmr 4, 26 + vmr 5, 27 + vmr 6, 28 + vmr 7, 29 + vmr 8, 30 + bl do_mul # r^2 r^1 + xxpermdi 58, 58, 36, 0x3 # r0 + xxpermdi 59, 59, 37, 0x3 # r1 + xxpermdi 60, 60, 38, 0x3 # r2 + xxpermdi 61, 61, 39, 0x3 # r3 + xxpermdi 62, 62, 40, 0x3 # r4 + xxpermdi 36, 36, 36, 0x3 + xxpermdi 37, 37, 37, 0x3 + xxpermdi 38, 38, 38, 0x3 + xxpermdi 39, 39, 39, 0x3 + xxpermdi 40, 40, 40, 0x3 + vspltisb 13, 2 + vsld 9, 27, 13 + vsld 10, 28, 13 + vsld 11, 29, 13 + vsld 12, 30, 13 + vaddudm 0, 9, 27 + vaddudm 1, 10, 28 + vaddudm 2, 11, 29 + vaddudm 3, 12, 30 + + bl do_mul # r^4 r^3 + vmrgow 26, 26, 4 + vmrgow 27, 27, 5 + vmrgow 28, 28, 6 + vmrgow 29, 29, 7 + vmrgow 30, 30, 8 + vspltisb 13, 2 + vsld 9, 27, 13 + vsld 10, 28, 13 + vsld 11, 29, 13 + vsld 12, 30, 13 + vaddudm 0, 9, 27 + vaddudm 1, 10, 28 + vaddudm 2, 11, 29 + vaddudm 3, 12, 30 + + # r^2 r^4 + xxlor 0, 58, 58 + xxlor 1, 59, 59 + xxlor 2, 60, 60 + xxlor 3, 61, 61 + xxlor 4, 62, 62 + xxlor 5, 32, 32 + xxlor 6, 33, 33 + xxlor 7, 34, 34 + xxlor 8, 35, 35 + + vspltw 9, 26, 3 + vspltw 10, 26, 2 + vmrgow 26, 10, 9 + vspltw 9, 27, 3 + vspltw 10, 27, 2 + vmrgow 27, 10, 9 + vspltw 9, 28, 3 + vspltw 10, 28, 2 + vmrgow 28, 10, 9 + vspltw 9, 29, 3 + vspltw 10, 29, 2 + vmrgow 29, 10, 9 + vspltw 9, 30, 3 + vspltw 10, 30, 2 + vmrgow 30, 10, 9 + + vsld 9, 27, 13 + vsld 10, 28, 13 + vsld 11, 29, 13 + vsld 12, 30, 13 + vaddudm 0, 9, 27 + vaddudm 1, 10, 28 + vaddudm 2, 11, 29 + vaddudm 3, 12, 30 +.endm + +SYM_FUNC_START_LOCAL(do_mul) + mul_odd + + # do reduction ( h %= p ) + # carry reduction + vspltisb 9, 2 + vsrd 10, 14, 31 + vsrd 11, 17, 31 + vand 7, 17, 25 + vand 4, 14, 25 + vaddudm 18, 18, 11 + vsrd 12, 18, 31 + vaddudm 15, 15, 10 + + vsrd 11, 15, 31 + vand 8, 18, 25 + vand 5, 15, 25 + vaddudm 4, 4, 12 + vsld 10, 12, 9 + vaddudm 6, 16, 11 + + vsrd 13, 6, 31 + vand 6, 6, 25 + vaddudm 4, 4, 10 + vsrd 10, 4, 31 + vaddudm 7, 7, 13 + + vsrd 11, 7, 31 + vand 7, 7, 25 + vand 4, 4, 25 + vaddudm 5, 5, 10 + vaddudm 8, 8, 11 + blr +SYM_FUNC_END(do_mul) + +# +# init key +# +.macro do_poly1305_init + addis 10, 2, rmask@toc@ha + addi 10, 10, rmask@toc@l + + ld 11, 0(10) + ld 12, 8(10) + + li 14, 16 + li 15, 32 + addis 10, 2, cnum@toc@ha + addi 10, 10, cnum@toc@l + lvx 25, 0, 10 # v25 - mask + lvx 31, 14, 10 # v31 = 1a + lvx 19, 15, 10 # v19 = 1 << 24 + lxv 24, 48(10) # vs24 + lxv 25, 64(10) # vs25 + + # initialize + # load key from r3 to vectors + ld 9, 24(3) + ld 10, 32(3) + and. 9, 9, 11 + and. 10, 10, 12 + + # break 26 bits + extrdi 14, 9, 26, 38 + extrdi 15, 9, 26, 12 + extrdi 16, 9, 12, 0 + mtvsrdd 58, 0, 14 + insrdi 16, 10, 14, 38 + mtvsrdd 59, 0, 15 + extrdi 17, 10, 26, 24 + mtvsrdd 60, 0, 16 + extrdi 18, 10, 24, 0 + mtvsrdd 61, 0, 17 + mtvsrdd 62, 0, 18 + + # r1 = r1 * 5, r2 = r2 * 5, r3 = r3 * 5, r4 = r4 * 5 + li 9, 5 + mtvsrdd 36, 0, 9 + vmulouw 0, 27, 4 # v0 = rr0 + vmulouw 1, 28, 4 # v1 = rr1 + vmulouw 2, 29, 4 # v2 = rr2 + vmulouw 3, 30, 4 # v3 = rr3 +.endm + +# +# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m) +# k = 32 bytes key +# r3 = k (r, s) +# r4 = mlen +# r5 = m +# +SYM_FUNC_START(poly1305_p10le_4blocks) +.align 5 + cmpdi 5, 64 + blt Out_no_poly1305 + + SAVE_REGS + + do_poly1305_init + + li 21, 0 # counter to message + + poly1305_setup_r + + # load previous H state + # break/convert r6 to 26 bits + ld 9, 0(3) + ld 10, 8(3) + ld 19, 16(3) + sldi 19, 19, 24 + mtvsrdd 41, 0, 19 + extrdi 14, 9, 26, 38 + extrdi 15, 9, 26, 12 + extrdi 16, 9, 12, 0 + mtvsrdd 36, 0, 14 + insrdi 16, 10, 14, 38 + mtvsrdd 37, 0, 15 + extrdi 17, 10, 26, 24 + mtvsrdd 38, 0, 16 + extrdi 18, 10, 24, 0 + mtvsrdd 39, 0, 17 + mtvsrdd 40, 0, 18 + vor 8, 8, 9 + + # input m1 m2 + add 20, 4, 21 + xxlor 49, 24, 24 + xxlor 50, 25, 25 + lxvw4x 43, 0, 20 + addi 17, 20, 16 + lxvw4x 44, 0, 17 + vperm 14, 11, 12, 17 + vperm 15, 11, 12, 18 + vand 9, 14, 25 # a0 + vsrd 10, 14, 31 # >> 26 + vsrd 11, 10, 31 # 12 bits left + vand 10, 10, 25 # a1 + vspltisb 13, 12 + vand 16, 15, 25 + vsld 12, 16, 13 + vor 11, 11, 12 + vand 11, 11, 25 # a2 + vspltisb 13, 14 + vsrd 12, 15, 13 # >> 14 + vsrd 13, 12, 31 # >> 26, a4 + vand 12, 12, 25 # a3 + + vaddudm 20, 4, 9 + vaddudm 21, 5, 10 + vaddudm 22, 6, 11 + vaddudm 23, 7, 12 + vaddudm 24, 8, 13 + + # m3 m4 + addi 17, 17, 16 + lxvw4x 43, 0, 17 + addi 17, 17, 16 + lxvw4x 44, 0, 17 + vperm 14, 11, 12, 17 + vperm 15, 11, 12, 18 + vand 9, 14, 25 # a0 + vsrd 10, 14, 31 # >> 26 + vsrd 11, 10, 31 # 12 bits left + vand 10, 10, 25 # a1 + vspltisb 13, 12 + vand 16, 15, 25 + vsld 12, 16, 13 + vspltisb 13, 14 + vor 11, 11, 12 + vand 11, 11, 25 # a2 + vsrd 12, 15, 13 # >> 14 + vsrd 13, 12, 31 # >> 26, a4 + vand 12, 12, 25 # a3 + + # Smash 4 message blocks into 5 vectors of [m4, m2, m3, m1] + vmrgow 4, 9, 20 + vmrgow 5, 10, 21 + vmrgow 6, 11, 22 + vmrgow 7, 12, 23 + vmrgow 8, 13, 24 + vaddudm 8, 8, 19 + + addi 5, 5, -64 # len -= 64 + addi 21, 21, 64 # offset += 64 + + li 9, 64 + divdu 31, 5, 9 + + cmpdi 31, 0 + ble Skip_block_loop + + mtctr 31 + +# h4 = m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r +# Rewrite the polynominal sum of product as follows, +# h1 = (h0 + m1) * r^2, h2 = (h0 + m2) * r^2 +# h3 = (h1 + m3) * r^2, h4 = (h2 + m4) * r^2 --> (h0 + m1) r*4 + (h3 + m3) r^2, (h0 + m2) r^4 + (h0 + m4) r^2 +# .... Repeat +# h5 = (h3 + m5) * r^2, h6 = (h4 + m6) * r^2 --> +# h7 = (h5 + m7) * r^2, h8 = (h6 + m8) * r^1 --> m5 * r^4 + m6 * r^3 + m7 * r^2 + m8 * r +# +loop_4blocks: + + # Multiply odd words and even words + mul_odd + mul_even + # carry reduction + vspltisb 9, 2 + vsrd 10, 14, 31 + vsrd 11, 17, 31 + vand 7, 17, 25 + vand 4, 14, 25 + vaddudm 18, 18, 11 + vsrd 12, 18, 31 + vaddudm 15, 15, 10 + + vsrd 11, 15, 31 + vand 8, 18, 25 + vand 5, 15, 25 + vaddudm 4, 4, 12 + vsld 10, 12, 9 + vaddudm 6, 16, 11 + + vsrd 13, 6, 31 + vand 6, 6, 25 + vaddudm 4, 4, 10 + vsrd 10, 4, 31 + vaddudm 7, 7, 13 + + vsrd 11, 7, 31 + vand 7, 7, 25 + vand 4, 4, 25 + vaddudm 5, 5, 10 + vaddudm 8, 8, 11 + + # input m1 m2 m3 m4 + add 20, 4, 21 + xxlor 49, 24, 24 + xxlor 50, 25, 25 + lxvw4x 43, 0, 20 + addi 17, 20, 16 + lxvw4x 44, 0, 17 + vperm 14, 11, 12, 17 + vperm 15, 11, 12, 18 + addi 17, 17, 16 + lxvw4x 43, 0, 17 + addi 17, 17, 16 + lxvw4x 44, 0, 17 + vperm 17, 11, 12, 17 + vperm 18, 11, 12, 18 + + vand 20, 14, 25 # a0 + vand 9, 17, 25 # a0 + vsrd 21, 14, 31 # >> 26 + vsrd 22, 21, 31 # 12 bits left + vsrd 10, 17, 31 # >> 26 + vsrd 11, 10, 31 # 12 bits left + + vand 21, 21, 25 # a1 + vand 10, 10, 25 # a1 + + vspltisb 13, 12 + vand 16, 15, 25 + vsld 23, 16, 13 + vor 22, 22, 23 + vand 22, 22, 25 # a2 + vand 16, 18, 25 + vsld 12, 16, 13 + vor 11, 11, 12 + vand 11, 11, 25 # a2 + vspltisb 13, 14 + vsrd 23, 15, 13 # >> 14 + vsrd 24, 23, 31 # >> 26, a4 + vand 23, 23, 25 # a3 + vsrd 12, 18, 13 # >> 14 + vsrd 13, 12, 31 # >> 26, a4 + vand 12, 12, 25 # a3 + + vaddudm 4, 4, 20 + vaddudm 5, 5, 21 + vaddudm 6, 6, 22 + vaddudm 7, 7, 23 + vaddudm 8, 8, 24 + + # Smash 4 message blocks into 5 vectors of [m4, m2, m3, m1] + vmrgow 4, 9, 4 + vmrgow 5, 10, 5 + vmrgow 6, 11, 6 + vmrgow 7, 12, 7 + vmrgow 8, 13, 8 + vaddudm 8, 8, 19 + + addi 5, 5, -64 # len -= 64 + addi 21, 21, 64 # offset += 64 + + bdnz loop_4blocks + +Skip_block_loop: + xxlor 58, 0, 0 + xxlor 59, 1, 1 + xxlor 60, 2, 2 + xxlor 61, 3, 3 + xxlor 62, 4, 4 + xxlor 32, 5, 5 + xxlor 33, 6, 6 + xxlor 34, 7, 7 + xxlor 35, 8, 8 + + # Multiply odd words and even words + mul_odd + mul_even + + # Sum the products. + xxpermdi 41, 31, 46, 0 + xxpermdi 42, 31, 47, 0 + vaddudm 4, 14, 9 + xxpermdi 36, 31, 36, 3 + vaddudm 5, 15, 10 + xxpermdi 37, 31, 37, 3 + xxpermdi 43, 31, 48, 0 + vaddudm 6, 16, 11 + xxpermdi 38, 31, 38, 3 + xxpermdi 44, 31, 49, 0 + vaddudm 7, 17, 12 + xxpermdi 39, 31, 39, 3 + xxpermdi 45, 31, 50, 0 + vaddudm 8, 18, 13 + xxpermdi 40, 31, 40, 3 + + # carry reduction + vspltisb 9, 2 + vsrd 10, 4, 31 + vsrd 11, 7, 31 + vand 7, 7, 25 + vand 4, 4, 25 + vaddudm 8, 8, 11 + vsrd 12, 8, 31 + vaddudm 5, 5, 10 + + vsrd 11, 5, 31 + vand 8, 8, 25 + vand 5, 5, 25 + vaddudm 4, 4, 12 + vsld 10, 12, 9 + vaddudm 6, 6, 11 + + vsrd 13, 6, 31 + vand 6, 6, 25 + vaddudm 4, 4, 10 + vsrd 10, 4, 31 + vaddudm 7, 7, 13 + + vsrd 11, 7, 31 + vand 7, 7, 25 + vand 4, 4, 25 + vaddudm 5, 5, 10 + vsrd 10, 5, 31 + vand 5, 5, 25 + vaddudm 6, 6, 10 + vaddudm 8, 8, 11 + + b do_final_update + +do_final_update: + # combine 26 bit limbs + # v4, v5, v6, v7 and v8 are 26 bit vectors + vsld 5, 5, 31 + vor 20, 4, 5 + vspltisb 11, 12 + vsrd 12, 6, 11 + vsld 6, 6, 31 + vsld 6, 6, 31 + vor 20, 20, 6 + vspltisb 11, 14 + vsld 7, 7, 11 + vor 21, 7, 12 + mfvsrld 16, 40 # save last 2 bytes + vsld 8, 8, 11 + vsld 8, 8, 31 + vor 21, 21, 8 + mfvsrld 17, 52 + mfvsrld 19, 53 + srdi 16, 16, 24 + + std 17, 0(3) + std 19, 8(3) + stw 16, 16(3) + +Out_loop: + li 3, 0 + + RESTORE_REGS + + blr + +Out_no_poly1305: + li 3, 0 + blr +SYM_FUNC_END(poly1305_p10le_4blocks) + +# +# ======================================================================= +# The following functions implement 64 x 64 bits multiplication poly1305. +# +SYM_FUNC_START_LOCAL(Poly1305_init_64) + # mask 0x0FFFFFFC0FFFFFFC + # mask 0x0FFFFFFC0FFFFFFF + addis 10, 2, rmask@toc@ha + addi 10, 10, rmask@toc@l + ld 11, 0(10) + ld 12, 8(10) + + # initialize + # load key from r3 + ld 9, 24(3) + ld 10, 32(3) + and. 9, 9, 11 # cramp mask r0 + and. 10, 10, 12 # cramp mask r1 + + srdi 21, 10, 2 + add 19, 21, 10 # s1: r19 - (r1 >> 2) *5 + + # setup r and s + li 25, 0 + mtvsrdd 32+0, 9, 19 # r0, s1 + mtvsrdd 32+1, 10, 9 # r1, r0 + mtvsrdd 32+2, 19, 25 # s1 + mtvsrdd 32+3, 9, 25 # r0 + + blr +SYM_FUNC_END(Poly1305_init_64) + +# Poly1305_mult +# v6 = (h0, h1), v8 = h2 +# v0 = (r0, s1), v1 = (r1, r0), v2 = s1, v3 = r0 +# +# Output: v7, v10, v11 +# +SYM_FUNC_START_LOCAL(Poly1305_mult) + # + # d0 = h0 * r0 + h1 * s1 + vmsumudm 7, 6, 0, 9 # h0 * r0, h1 * s1 + + # d1 = h0 * r1 + h1 * r0 + h2 * s1 + vmsumudm 11, 6, 1, 9 # h0 * r1, h1 * r0 + vmsumudm 10, 8, 2, 11 # d1 += h2 * s1 + + # d2 = r0 + vmsumudm 11, 8, 3, 9 # d2 = h2 * r0 + blr +SYM_FUNC_END(Poly1305_mult) + +# +# carry reduction +# h %=p +# +# Input: v7, v10, v11 +# Output: r27, r28, r29 +# +SYM_FUNC_START_LOCAL(Carry_reduction) + mfvsrld 27, 32+7 + mfvsrld 28, 32+10 + mfvsrld 29, 32+11 + mfvsrd 20, 32+7 # h0.h + mfvsrd 21, 32+10 # h1.h + + addc 28, 28, 20 + adde 29, 29, 21 + srdi 22, 29, 0x2 + sldi 23, 22, 0x2 + add 23, 23, 22 # (h2 & 3) * 5 + addc 27, 27, 23 # h0 + addze 28, 28 # h1 + andi. 29, 29, 0x3 # h2 + blr +SYM_FUNC_END(Carry_reduction) + +# +# poly1305 multiplication +# h *= r, h %= p +# d0 = h0 * r0 + h1 * s1 +# d1 = h0 * r1 + h1 * r0 + h2 * s1 +# d2 = h0 * r0 +# +# +# unsigned int poly1305_test_64s(unisgned char *state, const byte *src, size_t len, highbit) +# - no highbit if final leftover block (highbit = 0) +# +SYM_FUNC_START(poly1305_64s) + cmpdi 5, 0 + ble Out_no_poly1305_64 + + mflr 0 + std 0, 16(1) + stdu 1,-400(1) + + SAVE_GPR 14, 112, 1 + SAVE_GPR 15, 120, 1 + SAVE_GPR 16, 128, 1 + SAVE_GPR 17, 136, 1 + SAVE_GPR 18, 144, 1 + SAVE_GPR 19, 152, 1 + SAVE_GPR 20, 160, 1 + SAVE_GPR 21, 168, 1 + SAVE_GPR 22, 176, 1 + SAVE_GPR 23, 184, 1 + SAVE_GPR 24, 192, 1 + SAVE_GPR 25, 200, 1 + SAVE_GPR 26, 208, 1 + SAVE_GPR 27, 216, 1 + SAVE_GPR 28, 224, 1 + SAVE_GPR 29, 232, 1 + SAVE_GPR 30, 240, 1 + SAVE_GPR 31, 248, 1 + + # Init poly1305 + bl Poly1305_init_64 + + li 25, 0 # offset to inp and outp + + add 11, 25, 4 + + # load h + # h0, h1, h2? + ld 27, 0(3) + ld 28, 8(3) + lwz 29, 16(3) + + li 30, 16 + divdu 31, 5, 30 + + mtctr 31 + + mr 24, 6 # highbit + +Loop_block_64: + vxor 9, 9, 9 + + ld 20, 0(11) + ld 21, 8(11) + addi 11, 11, 16 + + addc 27, 27, 20 + adde 28, 28, 21 + adde 29, 29, 24 + + li 22, 0 + mtvsrdd 32+6, 27, 28 # h0, h1 + mtvsrdd 32+8, 29, 22 # h2 + + bl Poly1305_mult + + bl Carry_reduction + + bdnz Loop_block_64 + + std 27, 0(3) + std 28, 8(3) + stw 29, 16(3) + + li 3, 0 + + RESTORE_GPR 14, 112, 1 + RESTORE_GPR 15, 120, 1 + RESTORE_GPR 16, 128, 1 + RESTORE_GPR 17, 136, 1 + RESTORE_GPR 18, 144, 1 + RESTORE_GPR 19, 152, 1 + RESTORE_GPR 20, 160, 1 + RESTORE_GPR 21, 168, 1 + RESTORE_GPR 22, 176, 1 + RESTORE_GPR 23, 184, 1 + RESTORE_GPR 24, 192, 1 + RESTORE_GPR 25, 200, 1 + RESTORE_GPR 26, 208, 1 + RESTORE_GPR 27, 216, 1 + RESTORE_GPR 28, 224, 1 + RESTORE_GPR 29, 232, 1 + RESTORE_GPR 30, 240, 1 + RESTORE_GPR 31, 248, 1 + + addi 1, 1, 400 + ld 0, 16(1) + mtlr 0 + + blr + +Out_no_poly1305_64: + li 3, 0 + blr +SYM_FUNC_END(poly1305_64s) + +# +# Input: r3 = h, r4 = s, r5 = mac +# mac = h + s +# +SYM_FUNC_START(poly1305_emit_64) + ld 10, 0(3) + ld 11, 8(3) + ld 12, 16(3) + + # compare modulus + # h + 5 + (-p) + mr 6, 10 + mr 7, 11 + mr 8, 12 + addic. 6, 6, 5 + addze 7, 7 + addze 8, 8 + srdi 9, 8, 2 # overflow? + cmpdi 9, 0 + beq Skip_h64 + mr 10, 6 + mr 11, 7 + mr 12, 8 + +Skip_h64: + ld 6, 0(4) + ld 7, 8(4) + addc 10, 10, 6 + adde 11, 11, 7 + addze 12, 12 + + std 10, 0(5) + std 11, 8(5) + blr +SYM_FUNC_END(poly1305_emit_64) + +SYM_DATA_START_LOCAL(RMASK) +.align 5 +rmask: +.byte 0xff, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f +cnum: +.long 0x03ffffff, 0x00000000, 0x03ffffff, 0x00000000 +.long 0x1a, 0x00, 0x1a, 0x00 +.long 0x01000000, 0x01000000, 0x01000000, 0x01000000 +.long 0x00010203, 0x04050607, 0x10111213, 0x14151617 +.long 0x08090a0b, 0x0c0d0e0f, 0x18191a1b, 0x1c1d1e1f +SYM_DATA_END(RMASK) From ba8f8624fde2cbd92b995e6d92d4ecc97cf2a7f0 Mon Sep 17 00:00:00 2001 From: Danny Tsen Date: Wed, 26 Apr 2023 15:11:46 -0400 Subject: [PATCH 004/149] crypto: poly1305-p10 - Glue code for optmized Poly1305 implementation for ppc64le Signed-off-by: Danny Tsen Signed-off-by: Herbert Xu --- arch/powerpc/crypto/poly1305-p10-glue.c | 186 ++++++++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 arch/powerpc/crypto/poly1305-p10-glue.c diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c new file mode 100644 index 000000000000..95dd708573ee --- /dev/null +++ b/arch/powerpc/crypto/poly1305-p10-glue.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Poly1305 authenticator algorithm, RFC7539. + * + * Copyright 2023- IBM Corp. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen); +asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit); +asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst); + +static void vsx_begin(void) +{ + preempt_disable(); + enable_kernel_vsx(); +} + +static void vsx_end(void) +{ + disable_kernel_vsx(); + preempt_enable(); +} + +static int crypto_poly1305_p10_init(struct shash_desc *desc) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + + poly1305_core_init(&dctx->h); + dctx->buflen = 0; + dctx->rset = 0; + dctx->sset = false; + + return 0; +} + +static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx, + const u8 *inp, unsigned int len) +{ + unsigned int acc = 0; + + if (unlikely(!dctx->sset)) { + if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) { + struct poly1305_core_key *key = &dctx->core_r; + + key->key.r64[0] = get_unaligned_le64(&inp[0]); + key->key.r64[1] = get_unaligned_le64(&inp[8]); + inp += POLY1305_BLOCK_SIZE; + len -= POLY1305_BLOCK_SIZE; + acc += POLY1305_BLOCK_SIZE; + dctx->rset = 1; + } + if (len >= POLY1305_BLOCK_SIZE) { + dctx->s[0] = get_unaligned_le32(&inp[0]); + dctx->s[1] = get_unaligned_le32(&inp[4]); + dctx->s[2] = get_unaligned_le32(&inp[8]); + dctx->s[3] = get_unaligned_le32(&inp[12]); + acc += POLY1305_BLOCK_SIZE; + dctx->sset = true; + } + } + return acc; +} + +static int crypto_poly1305_p10_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + unsigned int bytes, used; + + if (unlikely(dctx->buflen)) { + bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); + memcpy(dctx->buf + dctx->buflen, src, bytes); + src += bytes; + srclen -= bytes; + dctx->buflen += bytes; + + if (dctx->buflen == POLY1305_BLOCK_SIZE) { + if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, + POLY1305_BLOCK_SIZE))) { + vsx_begin(); + poly1305_64s(&dctx->h, dctx->buf, + POLY1305_BLOCK_SIZE, 1); + vsx_end(); + } + dctx->buflen = 0; + } + } + + if (likely(srclen >= POLY1305_BLOCK_SIZE)) { + bytes = round_down(srclen, POLY1305_BLOCK_SIZE); + used = crypto_poly1305_setdctxkey(dctx, src, bytes); + if (likely(used)) { + srclen -= used; + src += used; + } + if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) { + vsx_begin(); + poly1305_p10le_4blocks(&dctx->h, src, srclen); + vsx_end(); + src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4)); + srclen %= POLY1305_BLOCK_SIZE * 4; + } + while (srclen >= POLY1305_BLOCK_SIZE) { + vsx_begin(); + poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1); + vsx_end(); + srclen -= POLY1305_BLOCK_SIZE; + src += POLY1305_BLOCK_SIZE; + } + } + + if (unlikely(srclen)) { + dctx->buflen = srclen; + memcpy(dctx->buf, src, srclen); + } + + return 0; +} + +static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst) +{ + struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); + + if (unlikely(!dctx->sset)) + return -ENOKEY; + + if ((dctx->buflen)) { + dctx->buf[dctx->buflen++] = 1; + memset(dctx->buf + dctx->buflen, 0, + POLY1305_BLOCK_SIZE - dctx->buflen); + vsx_begin(); + poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); + vsx_end(); + dctx->buflen = 0; + } + + poly1305_emit_64(&dctx->h, &dctx->s, dst); + return 0; +} + +static struct shash_alg poly1305_alg = { + .digestsize = POLY1305_DIGEST_SIZE, + .init = crypto_poly1305_p10_init, + .update = crypto_poly1305_p10_update, + .final = crypto_poly1305_p10_final, + .descsize = sizeof(struct poly1305_desc_ctx), + .base = { + .cra_name = "poly1305", + .cra_driver_name = "poly1305-p10", + .cra_priority = 300, + .cra_blocksize = POLY1305_BLOCK_SIZE, + .cra_module = THIS_MODULE, + }, +}; + +static int __init poly1305_p10_init(void) +{ + return crypto_register_shash(&poly1305_alg); +} + +static void __exit poly1305_p10_exit(void) +{ + crypto_unregister_shash(&poly1305_alg); +} + +module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init); +module_exit(poly1305_p10_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Danny Tsen "); +MODULE_DESCRIPTION("Optimized Poly1305 for P10"); +MODULE_ALIAS_CRYPTO("poly1305"); +MODULE_ALIAS_CRYPTO("poly1305-p10"); From 161fca7e3e905d5e99dbd79ed55d9212e6d80eb0 Mon Sep 17 00:00:00 2001 From: Danny Tsen Date: Wed, 26 Apr 2023 15:11:47 -0400 Subject: [PATCH 005/149] crypto: powerpc - Add chacha20/poly1305-p10 to Kconfig and Makefile Defined CRYPTO_CHACHA20_P10 and CRYPTO POLY1305_P10 in Kconfig to support optimized implementation for Power10 and later CPU. Added new module driver chacha-p10-crypto and poly1305-p10-crypto. Signed-off-by: Danny Tsen Signed-off-by: Herbert Xu --- arch/powerpc/crypto/Kconfig | 26 ++++++++++++++++++++++++++ arch/powerpc/crypto/Makefile | 4 ++++ 2 files changed, 30 insertions(+) diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index ad1872518992..f25024afdda5 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -111,4 +111,30 @@ config CRYPTO_AES_GCM_P10 Support for cryptographic acceleration instructions on Power10 or later CPU. This module supports stitched acceleration for AES/GCM. +config CRYPTO_CHACHA20_P10 + tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)" + depends on PPC64 && CPU_LITTLE_ENDIAN + select CRYPTO_SKCIPHER + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CHACHA + help + Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 + stream cipher algorithms + + Architecture: PowerPC64 + - Power10 or later + - Little-endian + +config CRYPTO_POLY1305_P10 + tristate "Hash functions: Poly1305 (P10 or later)" + depends on PPC64 && CPU_LITTLE_ENDIAN + select CRYPTO_HASH + select CRYPTO_LIB_POLY1305_GENERIC + help + Poly1305 authenticator algorithm (RFC7539) + + Architecture: PowerPC64 + - Power10 or later + - Little-endian + endmenu diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 7b4f516abec1..ebdac1b9eb9a 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -14,6 +14,8 @@ obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o +obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o +obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o md5-ppc-y := md5-asm.o md5-glue.o @@ -23,6 +25,8 @@ sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o +chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o +poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o quiet_cmd_perl = PERL $@ cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@ From 31ba6dd2298fd9c49ecf66200103b1e88055f7a0 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Mon, 19 Jun 2023 21:24:24 +0800 Subject: [PATCH 006/149] KEYS: fix kernel-doc warnings in verify_pefile Fix kernel-doc warnings in verify_pefile: crypto/asymmetric_keys/verify_pefile.c:423: warning: Excess function parameter 'trust_keys' description in 'verify_pefile_signature' crypto/asymmetric_keys/verify_pefile.c:423: warning: Function parameter or member 'trusted_keys' not described in 'verify_pefile_signature' Signed-off-by: Gaosheng Cui Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/verify_pefile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index 22beaf2213a2..f440767bd727 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c @@ -391,7 +391,7 @@ error_no_desc: * verify_pefile_signature - Verify the signature on a PE binary image * @pebuf: Buffer containing the PE binary image * @pelen: Length of the binary image - * @trust_keys: Signing certificate(s) to use as starting points + * @trusted_keys: Signing certificate(s) to use as starting points * @usage: The use to which the key is being put. * * Validate that the certificate chain inside the PKCS#7 message inside the PE From babb80b3ecc6f40c962e13c654ebcd27f25ee327 Mon Sep 17 00:00:00 2001 From: Azeem Shaikh Date: Tue, 20 Jun 2023 20:08:32 +0000 Subject: [PATCH 007/149] crypto: lrw,xts - Replace strlcpy with strscpy strlcpy() reads the entire source buffer first. This read may exceed the destination size limit. This is both inefficient and can lead to linear read overflows if a source string is not NUL-terminated [1]. In an effort to remove strlcpy() completely [2], replace strlcpy() here with strscpy(). Direct replacement is safe here since return value of -errno is used to check for truncation instead of sizeof(dest). [1] https://www.kernel.org/doc/html/latest/process/deprecated.html#strlcpy [2] https://github.com/KSPP/linux/issues/89 Signed-off-by: Azeem Shaikh Reviewed-by: Kees Cook Signed-off-by: Herbert Xu --- crypto/lrw.c | 6 +++--- crypto/xts.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index 1b0f76ba3eb5..59260aefed28 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) * cipher name. */ if (!strncmp(cipher_name, "ecb(", 4)) { - unsigned len; + int len; - len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); - if (len < 2 || len >= sizeof(ecb_name)) + len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); + if (len < 2) goto err_free_inst; if (ecb_name[len - 1] != ')') diff --git a/crypto/xts.c b/crypto/xts.c index 09be909a6a1a..548b302c6c6a 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -396,10 +396,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb) * cipher name. */ if (!strncmp(cipher_name, "ecb(", 4)) { - unsigned len; + int len; - len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); - if (len < 2 || len >= sizeof(ctx->name)) + len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); + if (len < 2) goto err_free_inst; if (ctx->name[len - 1] != ')') From 74c6df413f64f349e8ae4166d97324803bf55b58 Mon Sep 17 00:00:00 2001 From: "Chang S. Bae" Date: Wed, 21 Jun 2023 05:06:53 -0700 Subject: [PATCH 008/149] crypto: x86/aesni - Align the address before aes_set_key_common() aes_set_key_common() performs runtime alignment to the void *raw_ctx pointer. This facilitates consistent access to the 16byte-aligned address during key extension. However, the alignment is already handlded in the GCM-related setkey functions before invoking the common function. Consequently, the alignment in the common function is unnecessary for those functions. To establish a consistent approach throughout the glue code, remove the aes_ctx() call from its current location. Instead, place it at each call site where the runtime alignment is currently absent. Link: https://lore.kernel.org/lkml/20230605024623.GA4653@quark.localdomain/ Suggested-by: Eric Biggers Signed-off-by: Chang S. Bae Cc: linux-crypto@vger.kernel.org Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_glue.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index a5b0cb3efeba..c4eea7e746e7 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -229,10 +229,10 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) return (struct crypto_aes_ctx *)ALIGN(addr, align); } -static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, +static int aes_set_key_common(struct crypto_tfm *tfm, + struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) { - struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); int err; if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && @@ -253,7 +253,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { - return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); + return aes_set_key_common(tfm, aes_ctx(crypto_tfm_ctx(tfm)), in_key, key_len); } static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -286,7 +286,7 @@ static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { return aes_set_key_common(crypto_skcipher_tfm(tfm), - crypto_skcipher_ctx(tfm), key, len); + aes_ctx(crypto_skcipher_ctx(tfm)), key, len); } static int ecb_encrypt(struct skcipher_request *req) @@ -893,13 +893,13 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, keylen /= 2; /* first half of xts-key is for crypt */ - err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx, + err = aes_set_key_common(crypto_skcipher_tfm(tfm), aes_ctx(ctx->raw_crypt_ctx), key, keylen); if (err) return err; /* second half of xts-key is for tweak */ - return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx, + return aes_set_key_common(crypto_skcipher_tfm(tfm), aes_ctx(ctx->raw_tweak_ctx), key + keylen, keylen); } From 0f942bdfe9d463be3073301519492f8d53c6b2d5 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Thu, 22 Jun 2023 10:26:35 +0100 Subject: [PATCH 009/149] crypto: qat - change value of default idle filter The power management configuration of 4xxx devices is too aggressive and in some conditions the device might be prematurely put to a low power state. Increase the idle filter value to prevent that. In future, this will be set by firmware. Fixes: e5745f34113b ("crypto: qat - enable power management for QAT GEN4") Signed-off-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h index dd112923e006..c2768762cca3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h @@ -35,7 +35,7 @@ #define ADF_GEN4_PM_MSG_PENDING BIT(0) #define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1) -#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0) +#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x6) #define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7) #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1) From b582763721828f6c3fcf94d371623127f0198f3f Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:51 -0500 Subject: [PATCH 010/149] crypto: ccp - Rename macro for security attributes The attribute_show() macro is only valid for determining the availability of security related sysfs entries. Rename the macro to better show this relationship. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-pci.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b603ad9b8341..c8f075a7f49f 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -32,7 +32,7 @@ struct sp_pci { }; static struct sp_device *sp_dev_master; -#define attribute_show(name, def) \ +#define security_attribute_show(name, def) \ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ char *buf) \ { \ @@ -42,24 +42,24 @@ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \ } -attribute_show(fused_part, FUSED_PART) +security_attribute_show(fused_part, FUSED_PART) static DEVICE_ATTR_RO(fused_part); -attribute_show(debug_lock_on, DEBUG_LOCK_ON) +security_attribute_show(debug_lock_on, DEBUG_LOCK_ON) static DEVICE_ATTR_RO(debug_lock_on); -attribute_show(tsme_status, TSME_STATUS) +security_attribute_show(tsme_status, TSME_STATUS) static DEVICE_ATTR_RO(tsme_status); -attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS) +security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS) static DEVICE_ATTR_RO(anti_rollback_status); -attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED) +security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED) static DEVICE_ATTR_RO(rpmc_production_enabled); -attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE) +security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE) static DEVICE_ATTR_RO(rpmc_spirom_available); -attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE) +security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE) static DEVICE_ATTR_RO(hsp_tpm_available); -attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED) +security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED) static DEVICE_ATTR_RO(rom_armor_enforced); -static struct attribute *psp_attrs[] = { +static struct attribute *psp_security_attrs[] = { &dev_attr_fused_part.attr, &dev_attr_debug_lock_on.attr, &dev_attr_tsme_status.attr, @@ -83,13 +83,13 @@ static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *a return 0; } -static struct attribute_group psp_attr_group = { - .attrs = psp_attrs, +static struct attribute_group psp_security_attr_group = { + .attrs = psp_security_attrs, .is_visible = psp_security_is_visible, }; static const struct attribute_group *psp_groups[] = { - &psp_attr_group, + &psp_security_attr_group, NULL, }; From 2e424c33d8e748b65b683988f80e711cd6a7f619 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:52 -0500 Subject: [PATCH 011/149] crypto: ccp - Add support for displaying PSP firmware versions As it's not always obvious what PSP bootloader or TEE version are present in OEM systems, add the ability to get this information from sysfs for supported platforms. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- Documentation/ABI/testing/sysfs-driver-ccp | 18 ++++++ drivers/crypto/ccp/sp-dev.h | 2 + drivers/crypto/ccp/sp-pci.c | 64 ++++++++++++++++++++++ 3 files changed, 84 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-driver-ccp b/Documentation/ABI/testing/sysfs-driver-ccp index 7aded9b75553..ee6b787eee7a 100644 --- a/Documentation/ABI/testing/sysfs-driver-ccp +++ b/Documentation/ABI/testing/sysfs-driver-ccp @@ -85,3 +85,21 @@ Description: Possible values: 0: Not enforced 1: Enforced + +What: /sys/bus/pci/devices//bootloader_version +Date: June 2023 +KernelVersion: 6.4 +Contact: mario.limonciello@amd.com +Description: + The /sys/bus/pci/devices//bootloader_version + file reports the firmware version of the AMD AGESA + bootloader. + +What: /sys/bus/pci/devices//tee_version +Date: June 2023 +KernelVersion: 6.4 +Contact: mario.limonciello@amd.com +Description: + The /sys/bus/pci/devices//tee_version + file reports the firmware version of the AMD Trusted + Execution Environment (TEE). diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 1253a0217985..76c32ee6bd65 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -51,6 +51,7 @@ struct tee_vdata { const unsigned int cmdbuff_addr_hi_reg; const unsigned int ring_wptr_reg; const unsigned int ring_rptr_reg; + const unsigned int info_reg; }; struct platform_access_vdata { @@ -69,6 +70,7 @@ struct psp_vdata { const unsigned int feature_reg; const unsigned int inten_reg; const unsigned int intsts_reg; + const unsigned int bootloader_info_reg; }; /* Structure to hold SP device data */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index c8f075a7f49f..6c93577950c7 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -8,6 +8,7 @@ * Author: Gary R Hook */ +#include #include #include #include @@ -24,6 +25,12 @@ #include "ccp-dev.h" #include "psp-dev.h" +/* used for version string AA.BB.CC.DD */ +#define AA GENMASK(31, 24) +#define BB GENMASK(23, 16) +#define CC GENMASK(15, 8) +#define DD GENMASK(7, 0) + #define MSIX_VECTORS 2 struct sp_pci { @@ -88,8 +95,65 @@ static struct attribute_group psp_security_attr_group = { .is_visible = psp_security_is_visible, }; +#define version_attribute_show(name, _offset) \ +static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct sp_device *sp = dev_get_drvdata(d); \ + struct psp_device *psp = sp->psp_data; \ + unsigned int val = ioread32(psp->io_regs + _offset); \ + return sysfs_emit(buf, "%02lx.%02lx.%02lx.%02lx\n", \ + FIELD_GET(AA, val), \ + FIELD_GET(BB, val), \ + FIELD_GET(CC, val), \ + FIELD_GET(DD, val)); \ +} + +version_attribute_show(bootloader_version, psp->vdata->bootloader_info_reg) +static DEVICE_ATTR_RO(bootloader_version); +version_attribute_show(tee_version, psp->vdata->tee->info_reg) +static DEVICE_ATTR_RO(tee_version); + +static struct attribute *psp_firmware_attrs[] = { + &dev_attr_bootloader_version.attr, + &dev_attr_tee_version.attr, + NULL, +}; + +static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct sp_device *sp = dev_get_drvdata(dev); + struct psp_device *psp = sp->psp_data; + unsigned int val = 0xffffffff; + + if (!psp) + return 0; + + if (attr == &dev_attr_bootloader_version.attr && + psp->vdata->bootloader_info_reg) + val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg); + + if (attr == &dev_attr_tee_version.attr && + psp->capability & PSP_CAPABILITY_TEE && + psp->vdata->tee->info_reg) + val = ioread32(psp->io_regs + psp->vdata->tee->info_reg); + + /* If platform disallows accessing this register it will be all f's */ + if (val != 0xffffffff) + return 0444; + + return 0; +} + +static struct attribute_group psp_firmware_attr_group = { + .attrs = psp_firmware_attrs, + .is_visible = psp_firmware_is_visible, +}; + static const struct attribute_group *psp_groups[] = { &psp_security_attr_group, + &psp_firmware_attr_group, NULL, }; From e938b08ad8cd7b757b7b37e8fb1c20897dd3ec09 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:53 -0500 Subject: [PATCH 012/149] crypto: ccp - Add bootloader and TEE version offsets The bootloader and TEE versions are stored in registers that can be accessed from sysfs. This exports the information for recent client and datacenter parts. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-pci.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 6c93577950c7..205b93d229a9 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -423,6 +423,7 @@ static const struct tee_vdata teev1 = { .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */ .ring_wptr_reg = 0x10550, /* C2PMSG_20 */ .ring_rptr_reg = 0x10554, /* C2PMSG_21 */ + .info_reg = 0x109e8, /* C2PMSG_58 */ }; static const struct tee_vdata teev2 = { @@ -448,6 +449,7 @@ static const struct platform_access_vdata pa_v2 = { static const struct psp_vdata pspv1 = { .sev = &sevv1, + .bootloader_info_reg = 0x105ec, /* C2PMSG_59 */ .feature_reg = 0x105fc, /* C2PMSG_63 */ .inten_reg = 0x10610, /* P2CMSG_INTEN */ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ @@ -455,6 +457,7 @@ static const struct psp_vdata pspv1 = { static const struct psp_vdata pspv2 = { .sev = &sevv2, + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ @@ -463,6 +466,7 @@ static const struct psp_vdata pspv2 = { static const struct psp_vdata pspv3 = { .tee = &teev1, .platform_access = &pa_v1, + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ @@ -471,6 +475,7 @@ static const struct psp_vdata pspv3 = { static const struct psp_vdata pspv4 = { .sev = &sevv2, .tee = &teev1, + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ From b8440d55f7d4ad2b669902301c87c482faf9a8f4 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:54 -0500 Subject: [PATCH 013/149] crypto: ccp - move setting PSP master to earlier in the init Dynamic boost control needs to use platform access symbols that look for the PSP master as part of initialization. So move the PSP master before psp_init() so that dynamic boost control can be initialized properly. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/psp-dev.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index e3d6955d3265..3390f0bd6408 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -173,13 +173,14 @@ int psp_dev_init(struct sp_device *sp) goto e_err; } + /* master device must be set for platform access */ + if (psp->sp->set_psp_master_device) + psp->sp->set_psp_master_device(psp->sp); + ret = psp_init(psp); if (ret) goto e_irq; - if (sp->set_psp_master_device) - sp->set_psp_master_device(sp); - /* Enable interrupt */ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg); @@ -188,6 +189,9 @@ int psp_dev_init(struct sp_device *sp) return 0; e_irq: + if (sp->clear_psp_master_device) + sp->clear_psp_master_device(sp); + sp_free_psp_irq(psp->sp, psp); e_err: sp->psp_data = NULL; From c04cf9e14f109ebcc425c1efd2c01294c52a4d62 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:55 -0500 Subject: [PATCH 014/149] crypto: ccp - Add support for fetching a nonce for dynamic boost control Dynamic Boost Control is a feature offered on AMD client platforms that allows software to request and set power or frequency limits. Only software that has authenticated with the PSP can retrieve or set these limits. Create a character device and ioctl for fetching the nonce. This ioctl supports optionally passing authentication information which will influence how many calls the nonce is valid for. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/dbc.c | 191 ++++++++++++++++++++++++++++ drivers/crypto/ccp/dbc.h | 44 +++++++ drivers/crypto/ccp/psp-dev.c | 9 ++ drivers/crypto/ccp/psp-dev.h | 1 + drivers/crypto/ccp/sp-dev.h | 5 + drivers/crypto/ccp/sp-pci.c | 1 + include/linux/psp-platform-access.h | 1 + include/uapi/linux/psp-dbc.h | 67 ++++++++++ 9 files changed, 321 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/dbc.c create mode 100644 drivers/crypto/ccp/dbc.h create mode 100644 include/uapi/linux/psp-dbc.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index f6196495e862..aa0ba2d17e1e 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -11,7 +11,8 @@ ccp-$(CONFIG_PCI) += sp-pci.o ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ - platform-access.o + platform-access.o \ + dbc.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c new file mode 100644 index 000000000000..f65e93a81e53 --- /dev/null +++ b/drivers/crypto/ccp/dbc.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Secure Processor Dynamic Boost Control interface + * + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Author: Mario Limonciello + */ + +#include "dbc.h" + +struct error_map { + u32 psp; + int ret; +}; + +#define DBC_ERROR_ACCESS_DENIED 0x0001 +#define DBC_ERROR_EXCESS_DATA 0x0004 +#define DBC_ERROR_BAD_PARAMETERS 0x0006 +#define DBC_ERROR_BAD_STATE 0x0007 +#define DBC_ERROR_NOT_IMPLEMENTED 0x0009 +#define DBC_ERROR_BUSY 0x000D +#define DBC_ERROR_MESSAGE_FAILURE 0x0307 +#define DBC_ERROR_OVERFLOW 0x300F +#define DBC_ERROR_SIGNATURE_INVALID 0x3072 + +static struct error_map error_codes[] = { + {DBC_ERROR_ACCESS_DENIED, -EACCES}, + {DBC_ERROR_EXCESS_DATA, -E2BIG}, + {DBC_ERROR_BAD_PARAMETERS, -EINVAL}, + {DBC_ERROR_BAD_STATE, -EAGAIN}, + {DBC_ERROR_MESSAGE_FAILURE, -ENOENT}, + {DBC_ERROR_NOT_IMPLEMENTED, -ENOENT}, + {DBC_ERROR_BUSY, -EBUSY}, + {DBC_ERROR_OVERFLOW, -ENFILE}, + {DBC_ERROR_SIGNATURE_INVALID, -EPERM}, + {0x0, 0x0}, +}; + +static int send_dbc_cmd(struct psp_dbc_device *dbc_dev, + enum psp_platform_access_msg msg) +{ + int ret; + + dbc_dev->mbox->req.header.status = 0; + ret = psp_send_platform_access_msg(msg, (struct psp_request *)dbc_dev->mbox); + if (ret == -EIO) { + int i; + + dev_dbg(dbc_dev->dev, + "msg 0x%x failed with PSP error: 0x%x\n", + msg, dbc_dev->mbox->req.header.status); + + for (i = 0; error_codes[i].psp; i++) { + if (dbc_dev->mbox->req.header.status == error_codes[i].psp) + return error_codes[i].ret; + } + } + + return ret; +} + +static int send_dbc_nonce(struct psp_dbc_device *dbc_dev) +{ + int ret; + + dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_nonce); + ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE); + if (ret == -EAGAIN) { + dev_dbg(dbc_dev->dev, "retrying get nonce\n"); + ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE); + } + + return ret; +} + +void dbc_dev_destroy(struct psp_device *psp) +{ + struct psp_dbc_device *dbc_dev = psp->dbc_data; + + if (!dbc_dev) + return; + + misc_deregister(&dbc_dev->char_dev); + mutex_destroy(&dbc_dev->ioctl_mutex); + psp->dbc_data = NULL; +} + +static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct psp_device *psp_master = psp_get_master_device(); + void __user *argp = (void __user *)arg; + struct psp_dbc_device *dbc_dev; + int ret; + + if (!psp_master || !psp_master->dbc_data) + return -ENODEV; + dbc_dev = psp_master->dbc_data; + + mutex_lock(&dbc_dev->ioctl_mutex); + + switch (cmd) { + case DBCIOCNONCE: + if (copy_from_user(&dbc_dev->mbox->dbc_nonce.user, argp, + sizeof(struct dbc_user_nonce))) { + ret = -EFAULT; + goto unlock; + } + + ret = send_dbc_nonce(dbc_dev); + if (ret) + goto unlock; + + if (copy_to_user(argp, &dbc_dev->mbox->dbc_nonce.user, + sizeof(struct dbc_user_nonce))) { + ret = -EFAULT; + goto unlock; + } + break; + default: + ret = -EINVAL; + + } +unlock: + mutex_unlock(&dbc_dev->ioctl_mutex); + + return ret; +} + +static const struct file_operations dbc_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = dbc_ioctl, +}; + +int dbc_dev_init(struct psp_device *psp) +{ + struct device *dev = psp->dev; + struct psp_dbc_device *dbc_dev; + int ret; + + if (!PSP_FEATURE(psp, DBC)) + return 0; + + dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL); + if (!dbc_dev) + return -ENOMEM; + + BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE); + dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); + if (!dbc_dev->mbox) { + ret = -ENOMEM; + goto cleanup_dev; + } + + psp->dbc_data = dbc_dev; + dbc_dev->dev = dev; + + ret = send_dbc_nonce(dbc_dev); + if (ret == -EACCES) { + dev_dbg(dbc_dev->dev, + "dynamic boost control was previously authenticated\n"); + ret = 0; + } + dev_dbg(dbc_dev->dev, "dynamic boost control is %savailable\n", + ret ? "un" : ""); + if (ret) { + ret = 0; + goto cleanup_mbox; + } + + dbc_dev->char_dev.minor = MISC_DYNAMIC_MINOR; + dbc_dev->char_dev.name = "dbc"; + dbc_dev->char_dev.fops = &dbc_fops; + dbc_dev->char_dev.mode = 0600; + ret = misc_register(&dbc_dev->char_dev); + if (ret) + goto cleanup_mbox; + + mutex_init(&dbc_dev->ioctl_mutex); + + return 0; + +cleanup_mbox: + devm_free_pages(dev, (unsigned long)dbc_dev->mbox); + +cleanup_dev: + psp->dbc_data = NULL; + devm_kfree(dev, dbc_dev); + + return ret; +} diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h new file mode 100644 index 000000000000..1c3a0a078d15 --- /dev/null +++ b/drivers/crypto/ccp/dbc.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AMD Platform Security Processor (PSP) Dynamic Boost Control support + * + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Author: Mario Limonciello + */ + +#ifndef __DBC_H__ +#define __DBC_H__ + +#include + +#include +#include +#include + +#include "psp-dev.h" + +struct psp_dbc_device { + struct device *dev; + + union dbc_buffer *mbox; + + struct mutex ioctl_mutex; + + struct miscdevice char_dev; +}; + +struct dbc_nonce { + struct psp_req_buffer_hdr header; + struct dbc_user_nonce user; +} __packed; + +union dbc_buffer { + struct psp_request req; + struct dbc_nonce dbc_nonce; +}; + +void dbc_dev_destroy(struct psp_device *psp); +int dbc_dev_init(struct psp_device *psp); + +#endif /* __DBC_H */ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 3390f0bd6408..d42d7bc62352 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -15,6 +15,7 @@ #include "sev-dev.h" #include "tee-dev.h" #include "platform-access.h" +#include "dbc.h" struct psp_device *psp_master; @@ -112,6 +113,12 @@ static void psp_init_platform_access(struct psp_device *psp) dev_warn(psp->dev, "platform access init failed: %d\n", ret); return; } + + /* dbc must come after platform access as it tests the feature */ + ret = dbc_dev_init(psp); + if (ret) + dev_warn(psp->dev, "failed to init dynamic boost control: %d\n", + ret); } static int psp_init(struct psp_device *psp) @@ -217,6 +224,8 @@ void psp_dev_destroy(struct sp_device *sp) tee_dev_destroy(psp); + dbc_dev_destroy(psp); + platform_access_dev_destroy(psp); sp_free_psp_irq(sp, psp); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 505e4bdeaca8..8a4de69399c5 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -40,6 +40,7 @@ struct psp_device { void *sev_data; void *tee_data; void *platform_access_data; + void *dbc_data; unsigned int capability; }; diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 76c32ee6bd65..2329ad524b49 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -28,6 +28,10 @@ #define CACHE_NONE 0x00 #define CACHE_WB_NO_ALLOC 0xb7 +#define PLATFORM_FEATURE_DBC 0x1 + +#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat) + /* Structure to hold CCP device data */ struct ccp_device; struct ccp_vdata { @@ -71,6 +75,7 @@ struct psp_vdata { const unsigned int inten_reg; const unsigned int intsts_reg; const unsigned int bootloader_info_reg; + const unsigned int platform_features; }; /* Structure to hold SP device data */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 205b93d229a9..b6ab56abeb68 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -470,6 +470,7 @@ static const struct psp_vdata pspv3 = { .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ + .platform_features = PLATFORM_FEATURE_DBC, }; static const struct psp_vdata pspv4 = { diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h index 75da8f5f7ad8..53b4a1df5180 100644 --- a/include/linux/psp-platform-access.h +++ b/include/linux/psp-platform-access.h @@ -8,6 +8,7 @@ enum psp_platform_access_msg { PSP_CMD_NONE = 0x0, PSP_I2C_REQ_BUS_CMD = 0x64, + PSP_DYNAMIC_BOOST_GET_NONCE, }; struct psp_req_buffer_hdr { diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h new file mode 100644 index 000000000000..d032f78934e2 --- /dev/null +++ b/include/uapi/linux/psp-dbc.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Userspace interface for AMD Dynamic Boost Control (DBC) + * + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Author: Mario Limonciello + */ + +#ifndef __PSP_DBC_USER_H__ +#define __PSP_DBC_USER_H__ + +#include + +/** + * DOC: AMD Dynamic Boost Control (DBC) interface + */ + +#define DBC_NONCE_SIZE 16 +#define DBC_SIG_SIZE 32 + +/** + * struct dbc_user_nonce - Nonce exchange structure (input/output). + * @auth_needed: Whether the PSP should authenticate this request (input). + * 0: no authentication, PSP will return single use nonce. + * 1: authentication: PSP will return multi-use nonce. + * @nonce: 8 byte value used for future authentication (output). + * @signature: Optional 32 byte signature created by software using a + * previous nonce (input). + */ +struct dbc_user_nonce { + __u32 auth_needed; + __u8 nonce[DBC_NONCE_SIZE]; + __u8 signature[DBC_SIG_SIZE]; +} __packed; + +/** + * Dynamic Boost Control (DBC) IOC + * + * possible return codes for all DBC IOCTLs: + * 0: success + * -EINVAL: invalid input + * -E2BIG: excess data passed + * -EFAULT: failed to copy to/from userspace + * -EBUSY: mailbox in recovery or in use + * -ENODEV: driver not bound with PSP device + * -EACCES: request isn't authorized + * -EINVAL: invalid parameter + * -ETIMEDOUT: request timed out + * -EAGAIN: invalid request for state machine + * -ENOENT: not implemented + * -ENFILE: overflow + * -EPERM: invalid signature + * -EIO: unknown error + */ +#define DBC_IOC_TYPE 'D' + +/** + * DBCIOCNONCE - Fetch a nonce from the PSP for authenticating commands. + * If a nonce is fetched without authentication it can only + * be utilized for one command. + * If a nonce is fetched with authentication it can be used + * for multiple requests. + */ +#define DBCIOCNONCE _IOWR(DBC_IOC_TYPE, 0x1, struct dbc_user_nonce) + +#endif /* __PSP_DBC_USER_H__ */ From d9408716d2126439fbc46f6c40e72792069b8411 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:56 -0500 Subject: [PATCH 015/149] crypto: ccp - Add support for setting user ID for dynamic boost control As part of the authentication flow for Dynamic Boost Control, the calling software will need to send a uid used in all of its future communications. Add support for another IOCTL call to let userspace software set this up. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/dbc.c | 18 ++++++++++++++++++ drivers/crypto/ccp/dbc.h | 6 ++++++ include/linux/psp-platform-access.h | 1 + include/uapi/linux/psp-dbc.h | 20 ++++++++++++++++++++ 4 files changed, 45 insertions(+) diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c index f65e93a81e53..c6f5fb3658ca 100644 --- a/drivers/crypto/ccp/dbc.c +++ b/drivers/crypto/ccp/dbc.c @@ -117,6 +117,24 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) goto unlock; } break; + case DBCIOCUID: + dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_set_uid); + if (copy_from_user(&dbc_dev->mbox->dbc_set_uid.user, argp, + sizeof(struct dbc_user_setuid))) { + ret = -EFAULT; + goto unlock; + } + + ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID); + if (ret) + goto unlock; + + if (copy_to_user(argp, &dbc_dev->mbox->dbc_set_uid.user, + sizeof(struct dbc_user_setuid))) { + ret = -EFAULT; + goto unlock; + } + break; default: ret = -EINVAL; diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h index 1c3a0a078d15..156435100076 100644 --- a/drivers/crypto/ccp/dbc.h +++ b/drivers/crypto/ccp/dbc.h @@ -33,9 +33,15 @@ struct dbc_nonce { struct dbc_user_nonce user; } __packed; +struct dbc_set_uid { + struct psp_req_buffer_hdr header; + struct dbc_user_setuid user; +} __packed; + union dbc_buffer { struct psp_request req; struct dbc_nonce dbc_nonce; + struct dbc_set_uid dbc_set_uid; }; void dbc_dev_destroy(struct psp_device *psp); diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h index 53b4a1df5180..18b9e0f0cb03 100644 --- a/include/linux/psp-platform-access.h +++ b/include/linux/psp-platform-access.h @@ -9,6 +9,7 @@ enum psp_platform_access_msg { PSP_CMD_NONE = 0x0, PSP_I2C_REQ_BUS_CMD = 0x64, PSP_DYNAMIC_BOOST_GET_NONCE, + PSP_DYNAMIC_BOOST_SET_UID, }; struct psp_req_buffer_hdr { diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h index d032f78934e2..7443c78ede19 100644 --- a/include/uapi/linux/psp-dbc.h +++ b/include/uapi/linux/psp-dbc.h @@ -18,6 +18,7 @@ #define DBC_NONCE_SIZE 16 #define DBC_SIG_SIZE 32 +#define DBC_UID_SIZE 16 /** * struct dbc_user_nonce - Nonce exchange structure (input/output). @@ -34,6 +35,16 @@ struct dbc_user_nonce { __u8 signature[DBC_SIG_SIZE]; } __packed; +/** + * struct dbc_user_setuid - UID exchange structure (input). + * @uid: 16 byte value representing software identity + * @signature: 32 byte signature created by software using a previous nonce + */ +struct dbc_user_setuid { + __u8 uid[DBC_UID_SIZE]; + __u8 signature[DBC_SIG_SIZE]; +} __packed; + /** * Dynamic Boost Control (DBC) IOC * @@ -64,4 +75,13 @@ struct dbc_user_nonce { */ #define DBCIOCNONCE _IOWR(DBC_IOC_TYPE, 0x1, struct dbc_user_nonce) +/** + * DBCIOCUID - Set the user ID (UID) of a calling process. + * The user ID is 8 bytes long. It must be programmed using a + * 32 byte signature built using the nonce fetched from + * DBCIOCNONCE. + * The UID can only be set once until the system is rebooted. + */ +#define DBCIOCUID _IOW(DBC_IOC_TYPE, 0x2, struct dbc_user_setuid) + #endif /* __PSP_DBC_USER_H__ */ From e2cfe05e9277b5a7abbbc186fec1ad37348dd956 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:57 -0500 Subject: [PATCH 016/149] crypto: ccp - Add support for getting and setting DBC parameters After software has authenticated a dynamic boost control request, it can fetch and set supported parameters using a selection of messages. Add support for these messages and export the ability to do this to userspace. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- drivers/crypto/ccp/dbc.c | 41 ++++++++++++++++++++ drivers/crypto/ccp/dbc.h | 6 +++ include/linux/psp-platform-access.h | 2 + include/uapi/linux/psp-dbc.h | 60 +++++++++++++++++++++++++++++ 4 files changed, 109 insertions(+) diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c index c6f5fb3658ca..839ea14b9a85 100644 --- a/drivers/crypto/ccp/dbc.c +++ b/drivers/crypto/ccp/dbc.c @@ -74,6 +74,30 @@ static int send_dbc_nonce(struct psp_dbc_device *dbc_dev) return ret; } +static int send_dbc_parameter(struct psp_dbc_device *dbc_dev) +{ + dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_param); + + switch (dbc_dev->mbox->dbc_param.user.msg_index) { + case PARAM_SET_FMAX_CAP: + case PARAM_SET_PWR_CAP: + case PARAM_SET_GFX_MODE: + return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_PARAMETER); + case PARAM_GET_FMAX_CAP: + case PARAM_GET_PWR_CAP: + case PARAM_GET_CURR_TEMP: + case PARAM_GET_FMAX_MAX: + case PARAM_GET_FMAX_MIN: + case PARAM_GET_SOC_PWR_MAX: + case PARAM_GET_SOC_PWR_MIN: + case PARAM_GET_SOC_PWR_CUR: + case PARAM_GET_GFX_MODE: + return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_PARAMETER); + } + + return -EINVAL; +} + void dbc_dev_destroy(struct psp_device *psp) { struct psp_dbc_device *dbc_dev = psp->dbc_data; @@ -135,6 +159,23 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) goto unlock; } break; + case DBCIOCPARAM: + if (copy_from_user(&dbc_dev->mbox->dbc_param.user, argp, + sizeof(struct dbc_user_param))) { + ret = -EFAULT; + goto unlock; + } + + ret = send_dbc_parameter(dbc_dev); + if (ret) + goto unlock; + + if (copy_to_user(argp, &dbc_dev->mbox->dbc_param.user, + sizeof(struct dbc_user_param))) { + ret = -EFAULT; + goto unlock; + } + break; default: ret = -EINVAL; diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h index 156435100076..e963099ca38e 100644 --- a/drivers/crypto/ccp/dbc.h +++ b/drivers/crypto/ccp/dbc.h @@ -38,10 +38,16 @@ struct dbc_set_uid { struct dbc_user_setuid user; } __packed; +struct dbc_param { + struct psp_req_buffer_hdr header; + struct dbc_user_param user; +} __packed; + union dbc_buffer { struct psp_request req; struct dbc_nonce dbc_nonce; struct dbc_set_uid dbc_set_uid; + struct dbc_param dbc_param; }; void dbc_dev_destroy(struct psp_device *psp); diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h index 18b9e0f0cb03..c1dc87fc536b 100644 --- a/include/linux/psp-platform-access.h +++ b/include/linux/psp-platform-access.h @@ -10,6 +10,8 @@ enum psp_platform_access_msg { PSP_I2C_REQ_BUS_CMD = 0x64, PSP_DYNAMIC_BOOST_GET_NONCE, PSP_DYNAMIC_BOOST_SET_UID, + PSP_DYNAMIC_BOOST_GET_PARAMETER, + PSP_DYNAMIC_BOOST_SET_PARAMETER, }; struct psp_req_buffer_hdr { diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h index 7443c78ede19..b3845a9ff5fd 100644 --- a/include/uapi/linux/psp-dbc.h +++ b/include/uapi/linux/psp-dbc.h @@ -45,6 +45,23 @@ struct dbc_user_setuid { __u8 signature[DBC_SIG_SIZE]; } __packed; +/** + * struct dbc_user_param - Parameter exchange structure (input/output). + * @msg_index: Message indicating what parameter to set or get (input) + * @param: 4 byte parameter, units are message specific. (input/output) + * @signature: 32 byte signature. + * - When sending a message this is to be created by software + * using a previous nonce (input) + * - For interpreting results, this signature is updated by the + * PSP to allow software to validate the authenticity of the + * results. + */ +struct dbc_user_param { + __u32 msg_index; + __u32 param; + __u8 signature[DBC_SIG_SIZE]; +} __packed; + /** * Dynamic Boost Control (DBC) IOC * @@ -84,4 +101,47 @@ struct dbc_user_setuid { */ #define DBCIOCUID _IOW(DBC_IOC_TYPE, 0x2, struct dbc_user_setuid) +/** + * DBCIOCPARAM - Set or get a parameter from the PSP. + * This request will only work after DBCIOCUID has successfully + * set the UID of the calling process. + * Whether the parameter is set or get is controlled by the + * message ID in the request. + * This command must be sent using a 32 byte signature built + * using the nonce fetched from DBCIOCNONCE. + * When the command succeeds, the 32 byte signature will be + * updated by the PSP for software to authenticate the results. + */ +#define DBCIOCPARAM _IOWR(DBC_IOC_TYPE, 0x3, struct dbc_user_param) + +/** + * enum dbc_cmd_msg - Messages utilized by DBCIOCPARAM + * @PARAM_GET_FMAX_CAP: Get frequency cap (MHz) + * @PARAM_SET_FMAX_CAP: Set frequency cap (MHz) + * @PARAM_GET_PWR_CAP: Get socket power cap (mW) + * @PARAM_SET_PWR_CAP: Set socket power cap (mW) + * @PARAM_GET_GFX_MODE: Get graphics mode (0/1) + * @PARAM_SET_GFX_MODE: Set graphics mode (0/1) + * @PARAM_GET_CURR_TEMP: Get current temperature (degrees C) + * @PARAM_GET_FMAX_MAX: Get maximum allowed value for frequency (MHz) + * @PARAM_GET_FMAX_MIN: Get minimum allowed value for frequency (MHz) + * @PARAM_GET_SOC_PWR_MAX: Get maximum allowed value for SoC power (mw) + * @PARAM_GET_SOC_PWR_MIN: Get minimum allowed value for SoC power (mw) + * @PARAM_GET_SOC_PWR_CUR: Get current value for SoC Power (mW) + */ +enum dbc_cmd_msg { + PARAM_GET_FMAX_CAP = 0x3, + PARAM_SET_FMAX_CAP = 0x4, + PARAM_GET_PWR_CAP = 0x5, + PARAM_SET_PWR_CAP = 0x6, + PARAM_GET_GFX_MODE = 0x7, + PARAM_SET_GFX_MODE = 0x8, + PARAM_GET_CURR_TEMP = 0x9, + PARAM_GET_FMAX_MAX = 0xA, + PARAM_GET_FMAX_MIN = 0xB, + PARAM_GET_SOC_PWR_MAX = 0xC, + PARAM_GET_SOC_PWR_MIN = 0xD, + PARAM_GET_SOC_PWR_CUR = 0xE, +}; + #endif /* __PSP_DBC_USER_H__ */ From febe3ed3222f92672d3e0471893aa8ab23275c28 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:58 -0500 Subject: [PATCH 017/149] crypto: ccp - Add a sample library for ioctl use Add a small shared library that demonstrates the usage of the IOCTL interface. This library can be linked to but, is intended to be loaded and used by higher level languages Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- tools/crypto/ccp/Makefile | 13 +++++++ tools/crypto/ccp/dbc.c | 72 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 tools/crypto/ccp/Makefile create mode 100644 tools/crypto/ccp/dbc.c diff --git a/tools/crypto/ccp/Makefile b/tools/crypto/ccp/Makefile new file mode 100644 index 000000000000..ae4a66d1558a --- /dev/null +++ b/tools/crypto/ccp/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +CFLAGS += -D__EXPORTED_HEADERS__ -I../../../include/uapi -I../../../include + +TARGET = dbc_library.so + +all: $(TARGET) + +dbc_library.so: dbc.c + $(CC) $(CFLAGS) $(LDFLAGS) -shared -o $@ $< + chmod -x $@ + +clean: + $(RM) $(TARGET) diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c new file mode 100644 index 000000000000..37e813175642 --- /dev/null +++ b/tools/crypto/ccp/dbc.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Secure Processor Dynamic Boost Control sample library + * + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Author: Mario Limonciello + */ + +#include +#include +#include + +/* if uapi header isn't installed, this might not yet exist */ +#ifndef __packed +#define __packed __attribute__((packed)) +#endif +#include + +int get_nonce(int fd, void *nonce_out, void *signature) +{ + struct dbc_user_nonce tmp = { + .auth_needed = !!signature, + }; + int ret; + + assert(nonce_out); + + if (signature) + memcpy(tmp.signature, signature, sizeof(tmp.signature)); + + ret = ioctl(fd, DBCIOCNONCE, &tmp); + if (ret) + return ret; + memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce)); + + return 0; +} + +int set_uid(int fd, __u8 *uid, __u8 *signature) +{ + struct dbc_user_setuid tmp; + + assert(uid); + assert(signature); + + memcpy(tmp.uid, uid, sizeof(tmp.uid)); + memcpy(tmp.signature, signature, sizeof(tmp.signature)); + + return ioctl(fd, DBCIOCUID, &tmp); +} + +int process_param(int fd, int msg_index, __u8 *signature, int *data) +{ + struct dbc_user_param tmp = { + .msg_index = msg_index, + .param = *data, + }; + int ret; + + assert(signature); + assert(data); + + memcpy(tmp.signature, signature, sizeof(tmp.signature)); + + ret = ioctl(fd, DBCIOCPARAM, &tmp); + if (ret) + return ret; + + *data = tmp.param; + return 0; +} From f40d42f116cf965a9e37e2991f19ca1b5b156210 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:49:59 -0500 Subject: [PATCH 018/149] crypto: ccp - Add a sample python script for Dynamic Boost Control Dynamic Boost Control commands are triggered by userspace with an IOCTL interface that userspace will prepare proper buffers for a request. To allow prototyping and testing this interface, add a python3 command line script that loads the dbc_library.so for utilizing the IOCTLs. The signature to use and UID are passed as arguments to this script. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- tools/crypto/ccp/.gitignore | 1 + tools/crypto/ccp/dbc.py | 64 +++++++++++++++++ tools/crypto/ccp/dbc_cli.py | 134 ++++++++++++++++++++++++++++++++++++ 3 files changed, 199 insertions(+) create mode 100644 tools/crypto/ccp/.gitignore create mode 100644 tools/crypto/ccp/dbc.py create mode 100755 tools/crypto/ccp/dbc_cli.py diff --git a/tools/crypto/ccp/.gitignore b/tools/crypto/ccp/.gitignore new file mode 100644 index 000000000000..bee8a64b79a9 --- /dev/null +++ b/tools/crypto/ccp/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/tools/crypto/ccp/dbc.py b/tools/crypto/ccp/dbc.py new file mode 100644 index 000000000000..3f6a825ffc9e --- /dev/null +++ b/tools/crypto/ccp/dbc.py @@ -0,0 +1,64 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0 + +import ctypes +import os + +DBC_UID_SIZE = 16 +DBC_NONCE_SIZE = 16 +DBC_SIG_SIZE = 32 + +PARAM_GET_FMAX_CAP = (0x3,) +PARAM_SET_FMAX_CAP = (0x4,) +PARAM_GET_PWR_CAP = (0x5,) +PARAM_SET_PWR_CAP = (0x6,) +PARAM_GET_GFX_MODE = (0x7,) +PARAM_SET_GFX_MODE = (0x8,) +PARAM_GET_CURR_TEMP = (0x9,) +PARAM_GET_FMAX_MAX = (0xA,) +PARAM_GET_FMAX_MIN = (0xB,) +PARAM_GET_SOC_PWR_MAX = (0xC,) +PARAM_GET_SOC_PWR_MIN = (0xD,) +PARAM_GET_SOC_PWR_CUR = (0xE,) + +DEVICE_NODE = "/dev/dbc" + +lib = ctypes.CDLL("./dbc_library.so", mode=ctypes.RTLD_GLOBAL) + + +def handle_error(code): + val = code * -1 + raise OSError(val, os.strerror(val)) + + +def get_nonce(device, signature): + if not device: + raise ValueError("Device required") + buf = ctypes.create_string_buffer(DBC_NONCE_SIZE) + ret = lib.get_nonce(device.fileno(), ctypes.byref(buf), signature) + if ret: + handle_error(ret) + return buf.value + + +def set_uid(device, new_uid, signature): + if not signature: + raise ValueError("Signature required") + if not new_uid: + raise ValueError("UID required") + ret = lib.set_uid(device.fileno(), new_uid, signature) + if ret: + handle_error(ret) + return True + + +def process_param(device, message, signature, data=None): + if not signature: + raise ValueError("Signature required") + if type(message) != tuple: + raise ValueError("Expected message tuple") + arg = ctypes.c_int(data if data else 0) + ret = lib.process_param(device.fileno(), message[0], signature, ctypes.pointer(arg)) + if ret: + handle_error(ret) + return arg, signature diff --git a/tools/crypto/ccp/dbc_cli.py b/tools/crypto/ccp/dbc_cli.py new file mode 100755 index 000000000000..bf52233fd038 --- /dev/null +++ b/tools/crypto/ccp/dbc_cli.py @@ -0,0 +1,134 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0 +import argparse +import binascii +import os +import errno +from dbc import * + +ERRORS = { + errno.EACCES: "Access is denied", + errno.E2BIG: "Excess data provided", + errno.EINVAL: "Bad parameters", + errno.EAGAIN: "Bad state", + errno.ENOENT: "Not implemented or message failure", + errno.EBUSY: "Busy", + errno.ENFILE: "Overflow", + errno.EPERM: "Signature invalid", +} + +messages = { + "get-fmax-cap": PARAM_GET_FMAX_CAP, + "set-fmax-cap": PARAM_SET_FMAX_CAP, + "get-power-cap": PARAM_GET_PWR_CAP, + "set-power-cap": PARAM_SET_PWR_CAP, + "get-graphics-mode": PARAM_GET_GFX_MODE, + "set-graphics-mode": PARAM_SET_GFX_MODE, + "get-current-temp": PARAM_GET_CURR_TEMP, + "get-fmax-max": PARAM_GET_FMAX_MAX, + "get-fmax-min": PARAM_GET_FMAX_MIN, + "get-soc-power-max": PARAM_GET_SOC_PWR_MAX, + "get-soc-power-min": PARAM_GET_SOC_PWR_MIN, + "get-soc-power-cur": PARAM_GET_SOC_PWR_CUR, +} + + +def _pretty_buffer(ba): + return str(binascii.hexlify(ba, " ")) + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Dynamic Boost control command line interface" + ) + parser.add_argument( + "command", + choices=["get-nonce", "get-param", "set-param", "set-uid"], + help="Command to send", + ) + parser.add_argument("--device", default="/dev/dbc", help="Device to operate") + parser.add_argument("--signature", help="File containing signature for command") + parser.add_argument("--message", choices=messages.keys(), help="Message index") + parser.add_argument("--data", help="Argument to pass to message") + parser.add_argument("--uid", help="File containing UID to pass") + return parser.parse_args() + + +def pretty_error(code): + if code in ERRORS: + print(ERRORS[code]) + else: + print("failed with return code %d" % code) + + +if __name__ == "__main__": + args = parse_args() + data = 0 + sig = None + uid = None + if not os.path.exists(args.device): + raise IOError("Missing device {device}".format(device=args.device)) + if args.signature: + if not os.path.exists(args.signature): + raise ValueError("Invalid signature file %s" % args.signature) + with open(args.signature, "rb") as f: + sig = f.read() + if len(sig) != DBC_SIG_SIZE: + raise ValueError( + "Invalid signature length %d (expected %d)" % (len(sig), DBC_SIG_SIZE) + ) + if args.uid: + if not os.path.exists(args.uid): + raise ValueError("Invalid uid file %s" % args.uid) + with open(args.uid, "rb") as f: + uid = f.read() + if len(uid) != DBC_UID_SIZE: + raise ValueError( + "Invalid UID length %d (expected %d)" % (len(uid), DBC_UID_SIZE) + ) + if args.data: + try: + data = int(args.data, 10) + except ValueError: + data = int(args.data, 16) + + with open(args.device) as d: + if args.command == "get-nonce": + try: + nonce = get_nonce(d, sig) + print("Nonce: %s" % _pretty_buffer(bytes(nonce))) + except OSError as e: + pretty_error(e.errno) + elif args.command == "set-uid": + try: + result = set_uid(d, uid, sig) + if result: + print("Set UID") + except OSError as e: + pretty_error(e.errno) + elif args.command == "get-param": + if not args.message or args.message.startswith("set"): + raise ValueError("Invalid message %s" % args.message) + try: + param, signature = process_param(d, messages[args.message], sig) + print( + "Parameter: {par}, response signature {sig}".format( + par=param, + sig=_pretty_buffer(bytes(signature)), + ) + ) + except OSError as e: + pretty_error(e.errno) + elif args.command == "set-param": + if not args.message or args.message.startswith("get"): + raise ValueError("Invalid message %s" % args.message) + try: + param, signature = process_param(d, messages[args.message], sig, data) + print( + "Parameter: {par}, response signature {sig}".format( + par=param, + sig=_pretty_buffer(bytes(signature)), + ) + ) + except OSError as e: + pretty_error(e.errno) From 15f8aa7bb3e550278aa561e81b2e72ffa8e442b7 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:50:00 -0500 Subject: [PATCH 019/149] crypto: ccp - Add unit tests for dynamic boost control Interacting with dynamic boost control messages requires the caller to supply a signature. To allow validation of individual dynamic boost control components, introduce a set of tests that can be run. The tests can be run in 3 distinct different environments, and so certain tests will be skipped depending on the environment. 1. Systems that do not support DBC. 2. Production systems that support DBC but are secured silicon. 3. Pre-production systems that support DBC but are unsecured silicon. Unsecured silicon does not validate the signature, and so this allows testing more of the state machine and functionality. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- tools/crypto/ccp/test_dbc.py | 266 +++++++++++++++++++++++++++++++++++ 1 file changed, 266 insertions(+) create mode 100755 tools/crypto/ccp/test_dbc.py diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py new file mode 100755 index 000000000000..998bb3e3cd04 --- /dev/null +++ b/tools/crypto/ccp/test_dbc.py @@ -0,0 +1,266 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0 +import unittest +import os +import time +import glob +from dbc import * + +# Artificial delay between set commands +SET_DELAY = 0.5 + + +class invalid_param(ctypes.Structure): + _fields_ = [ + ("data", ctypes.c_uint8), + ] + + +def system_is_secured() -> bool: + fused_part = glob.glob("/sys/bus/pci/drivers/ccp/**/fused_part")[0] + if os.path.exists(fused_part): + with open(fused_part, "r") as r: + return int(r.read()) == 1 + return True + + +class DynamicBoostControlTest(unittest.TestCase): + def __init__(self, data) -> None: + self.d = None + self.signature = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + self.uid = "1111111111111111" + super().__init__(data) + + def setUp(self) -> None: + self.d = open(DEVICE_NODE) + return super().setUp() + + def tearDown(self) -> None: + if self.d: + self.d.close() + return super().tearDown() + + +class TestUnsupportedSystem(DynamicBoostControlTest): + def setUp(self) -> None: + if os.path.exists(DEVICE_NODE): + self.skipTest("system is supported") + with self.assertRaises(FileNotFoundError) as error: + super().setUp() + self.assertEqual(error.exception.errno, 2) + + def test_unauthenticated_nonce(self) -> None: + """fetch unauthenticated nonce""" + with self.assertRaises(ValueError) as error: + get_nonce(self.d, None) + + +class TestInvalidIoctls(DynamicBoostControlTest): + def __init__(self, data) -> None: + self.data = invalid_param() + self.data.data = 1 + super().__init__(data) + + def setUp(self) -> None: + if not os.path.exists(DEVICE_NODE): + self.skipTest("system is unsupported") + return super().setUp() + + def test_invalid_nonce_ioctl(self) -> None: + """tries to call get_nonce ioctl with invalid data structures""" + + # 0x1 (get nonce), and invalid data + INVALID1 = IOWR(ord("D"), 0x01, invalid_param) + with self.assertRaises(OSError) as error: + fcntl.ioctl(self.d, INVALID1, self.data, True) + self.assertEqual(error.exception.errno, 22) + + def test_invalid_setuid_ioctl(self) -> None: + """tries to call set_uid ioctl with invalid data structures""" + + # 0x2 (set uid), and invalid data + INVALID2 = IOW(ord("D"), 0x02, invalid_param) + with self.assertRaises(OSError) as error: + fcntl.ioctl(self.d, INVALID2, self.data, True) + self.assertEqual(error.exception.errno, 22) + + def test_invalid_setuid_rw_ioctl(self) -> None: + """tries to call set_uid ioctl with invalid data structures""" + + # 0x2 as RW (set uid), and invalid data + INVALID3 = IOWR(ord("D"), 0x02, invalid_param) + with self.assertRaises(OSError) as error: + fcntl.ioctl(self.d, INVALID3, self.data, True) + self.assertEqual(error.exception.errno, 22) + + def test_invalid_param_ioctl(self) -> None: + """tries to call param ioctl with invalid data structures""" + # 0x3 (param), and invalid data + INVALID4 = IOWR(ord("D"), 0x03, invalid_param) + with self.assertRaises(OSError) as error: + fcntl.ioctl(self.d, INVALID4, self.data, True) + self.assertEqual(error.exception.errno, 22) + + def test_invalid_call_ioctl(self) -> None: + """tries to call the DBC ioctl with invalid data structures""" + # 0x4, and invalid data + INVALID5 = IOWR(ord("D"), 0x04, invalid_param) + with self.assertRaises(OSError) as error: + fcntl.ioctl(self.d, INVALID5, self.data, True) + self.assertEqual(error.exception.errno, 22) + + +class TestInvalidSignature(DynamicBoostControlTest): + def setUp(self) -> None: + if not os.path.exists(DEVICE_NODE): + self.skipTest("system is unsupported") + if not system_is_secured(): + self.skipTest("system is unfused") + return super().setUp() + + def test_unauthenticated_nonce(self) -> None: + """fetch unauthenticated nonce""" + get_nonce(self.d, None) + + def test_multiple_unauthenticated_nonce(self) -> None: + """ensure state machine always returns nonce""" + for count in range(0, 2): + get_nonce(self.d, None) + + def test_authenticated_nonce(self) -> None: + """fetch authenticated nonce""" + with self.assertRaises(OSError) as error: + get_nonce(self.d, self.signature) + self.assertEqual(error.exception.errno, 1) + + def test_set_uid(self) -> None: + """set uid""" + with self.assertRaises(OSError) as error: + set_uid(self.d, self.uid, self.signature) + self.assertEqual(error.exception.errno, 1) + + def test_get_param(self) -> None: + """fetch a parameter""" + with self.assertRaises(OSError) as error: + process_param(self.d, PARAM_GET_SOC_PWR_CUR, self.signature) + self.assertEqual(error.exception.errno, 1) + + def test_set_param(self) -> None: + """set a parameter""" + with self.assertRaises(OSError) as error: + process_param(self.d, PARAM_SET_PWR_CAP, self.signature, 1000) + self.assertEqual(error.exception.errno, 1) + + +class TestUnFusedSystem(DynamicBoostControlTest): + def setup_identity(self) -> None: + """sets up the identity of the caller""" + # if already authenticated these may fail + try: + get_nonce(self.d, None) + except PermissionError: + pass + try: + set_uid(self.d, self.uid, self.signature) + except BlockingIOError: + pass + try: + get_nonce(self.d, self.signature) + except PermissionError: + pass + + def setUp(self) -> None: + if not os.path.exists(DEVICE_NODE): + self.skipTest("system is unsupported") + if system_is_secured(): + self.skipTest("system is fused") + super().setUp() + self.setup_identity() + time.sleep(SET_DELAY) + + def test_get_valid_param(self) -> None: + """fetch all possible parameters""" + # SOC power + soc_power_max = process_param(self.d, PARAM_GET_SOC_PWR_MAX, self.signature) + soc_power_min = process_param(self.d, PARAM_GET_SOC_PWR_MIN, self.signature) + self.assertGreater(soc_power_max.parameter, soc_power_min.parameter) + + # fmax + fmax_max = process_param(self.d, PARAM_GET_FMAX_MAX, self.signature) + fmax_min = process_param(self.d, PARAM_GET_FMAX_MIN, self.signature) + self.assertGreater(fmax_max.parameter, fmax_min.parameter) + + # cap values + keys = { + "fmax-cap": PARAM_GET_FMAX_CAP, + "power-cap": PARAM_GET_PWR_CAP, + "current-temp": PARAM_GET_CURR_TEMP, + "soc-power-cur": PARAM_GET_SOC_PWR_CUR, + } + for k in keys: + result = process_param(self.d, keys[k], self.signature) + self.assertGreater(result.parameter, 0) + + def test_get_invalid_param(self) -> None: + """fetch an invalid parameter""" + try: + set_uid(self.d, self.uid, self.signature) + except OSError: + pass + with self.assertRaises(OSError) as error: + process_param(self.d, (0xF,), self.signature) + self.assertEqual(error.exception.errno, 22) + + def test_set_fmax(self) -> None: + """get/set fmax limit""" + # fetch current + original = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature) + + # set the fmax + target = original.parameter - 100 + process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, target) + time.sleep(SET_DELAY) + new = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature) + self.assertEqual(new.parameter, target) + + # revert back to current + process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original.parameter) + time.sleep(SET_DELAY) + cur = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature) + self.assertEqual(cur.parameter, original.parameter) + + def test_set_power_cap(self) -> None: + """get/set power cap limit""" + # fetch current + original = process_param(self.d, PARAM_GET_PWR_CAP, self.signature) + + # set the fmax + target = original.parameter - 10 + process_param(self.d, PARAM_SET_PWR_CAP, self.signature, target) + time.sleep(SET_DELAY) + new = process_param(self.d, PARAM_GET_PWR_CAP, self.signature) + self.assertEqual(new.parameter, target) + + # revert back to current + process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original.parameter) + time.sleep(SET_DELAY) + cur = process_param(self.d, PARAM_GET_PWR_CAP, self.signature) + self.assertEqual(cur.parameter, original.parameter) + + def test_set_3d_graphics_mode(self) -> None: + """set/get 3d graphics mode""" + # these aren't currently implemented but may be some day + # they are *expected* to fail + with self.assertRaises(OSError) as error: + process_param(self.d, PARAM_GET_GFX_MODE, self.signature) + self.assertEqual(error.exception.errno, 2) + + time.sleep(SET_DELAY) + + with self.assertRaises(OSError) as error: + process_param(self.d, PARAM_SET_GFX_MODE, self.signature, 1) + self.assertEqual(error.exception.errno, 2) + + +if __name__ == "__main__": + unittest.main() From 4b97d282235dd4d9bff1b6aa327350101bc79471 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 23 Jun 2023 08:50:01 -0500 Subject: [PATCH 020/149] crypto: ccp - Add Mario to MAINTAINERS I will maintain the platform access interface and dynamic boost control support. Acked-by: Tom Lendacky Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu --- MAINTAINERS | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 3be1bdfe8ecc..64602d4169d0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -915,6 +915,18 @@ S: Supported F: drivers/crypto/ccp/sev* F: include/uapi/linux/psp-sev.h +AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - DBC SUPPORT +M: Mario Limonciello +L: linux-crypto@vger.kernel.org +S: Supported +F: drivers/crypto/ccp/dbc.c +F: drivers/crypto/ccp/dbc.h +F: drivers/crypto/ccp/platform-access.c +F: drivers/crypto/ccp/platform-access.h +F: include/uapi/linux/psp-dbc.h +F: tools/crypto/ccp/*.c +F: tools/crypto/ccp/*.py + AMD DISPLAY CORE M: Harry Wentland M: Leo Li From 20508b751b4bef2cda999fe0268071b0fc335ac7 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 30 Jun 2023 15:54:24 +0800 Subject: [PATCH 021/149] crypto: sig - Remove some unused functions These functions are defined in the sig.c file, but not called elsewhere, so delete these unused functions. crypto/sig.c:24:34: warning: unused function '__crypto_sig_tfm'. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5701 Signed-off-by: Jiapeng Chong Signed-off-by: Herbert Xu --- crypto/sig.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/crypto/sig.c b/crypto/sig.c index b48c18ec65cd..224c47019297 100644 --- a/crypto/sig.c +++ b/crypto/sig.c @@ -21,11 +21,6 @@ static const struct crypto_type crypto_sig_type; -static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm) -{ - return container_of(tfm, struct crypto_sig, base); -} - static int crypto_sig_init_tfm(struct crypto_tfm *tfm) { if (tfm->__crt_alg->cra_type != &crypto_sig_type) From 865b50fe6ea85b41354967df03c317eb1627cc13 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 30 Jun 2023 17:32:07 +0200 Subject: [PATCH 022/149] crypto: qat - add fw_counters debugfs file Expose FW counters statistics by providing the "fw_counters" file under debugfs. Currently the statistics include the number of requests sent to the FW and the number of responses received from the FW for each Acceleration Engine, for all the QAT product line. This patch is based on earlier work done by Marco Chiappero. Co-developed-by: Adam Guerin Signed-off-by: Adam Guerin Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-driver-qat | 10 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_admin.c | 18 ++ .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 7 + .../intel/qat/qat_common/adf_fw_counters.c | 264 ++++++++++++++++++ .../intel/qat/qat_common/adf_fw_counters.h | 11 + 8 files changed, 313 insertions(+) create mode 100644 Documentation/ABI/testing/debugfs-driver-qat create mode 100644 drivers/crypto/intel/qat/qat_common/adf_fw_counters.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_fw_counters.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat new file mode 100644 index 000000000000..22d39c0ca1b2 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -0,0 +1,10 @@ +What: /sys/kernel/debug/qat__/qat/fw_counters +Date: November 2023 +KernelVersion: 6.6 +Contact: qat-linux@intel.com +Description: (RO) Read returns the number of requests sent to the FW and the number of responses + received from the FW for each Acceleration Engine + Reported firmware counters:: + + : Number of requests sent from Acceleration Engine N to FW and responses + Acceleration Engine N received from FW diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 38de3aba6e8c..f541046cdf9a 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -28,6 +28,7 @@ intel_qat-objs := adf_cfg.o \ qat_bl.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ + adf_fw_counters.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 0399417b91fc..a54a994b5e27 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -292,6 +292,7 @@ struct adf_accel_dev { unsigned long status; atomic_t ref_count; struct dentry *debugfs_dir; + struct dentry *fw_cntr_dbgfile; struct list_head list; struct module *owner; struct adf_accel_pci accel_pci_dev; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 118775ee02f2..4716b82d48ec 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -223,6 +223,24 @@ static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev, return 0; } +int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps) +{ + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_COUNTERS_GET; + + ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp); + if (ret || resp.status) + return -EFAULT; + + *reqs = resp.req_rec_count; + *resps = resp.resp_sent_count; + + return 0; +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index b8132eb9bc2a..4682be073c99 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -94,6 +94,7 @@ void adf_exit_aer(void); int adf_init_admin_comms(struct adf_accel_dev *accel_dev); void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); int adf_send_admin_init(struct adf_accel_dev *accel_dev); +int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index d0a2f892e6eb..5080ecffab03 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -6,6 +6,7 @@ #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_dbgfs.h" +#include "adf_fw_counters.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -56,6 +57,9 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) { if (!accel_dev->debugfs_dir) return; + + if (!accel_dev->is_vf) + adf_fw_counters_dbgfs_add(accel_dev); } /** @@ -66,4 +70,7 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) { if (!accel_dev->debugfs_dir) return; + + if (!accel_dev->is_vf) + adf_fw_counters_dbgfs_rm(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c new file mode 100644 index 000000000000..cb6e09ef5c9f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_fw_counters.h" + +#define ADF_FW_COUNTERS_MAX_PADDING 16 + +enum adf_fw_counters_types { + ADF_FW_REQUESTS, + ADF_FW_RESPONSES, + ADF_FW_COUNTERS_COUNT +}; + +static const char * const adf_fw_counter_names[] = { + [ADF_FW_REQUESTS] = "Requests", + [ADF_FW_RESPONSES] = "Responses", +}; + +static_assert(ARRAY_SIZE(adf_fw_counter_names) == ADF_FW_COUNTERS_COUNT); + +struct adf_ae_counters { + u16 ae; + u64 values[ADF_FW_COUNTERS_COUNT]; +}; + +struct adf_fw_counters { + u16 ae_count; + struct adf_ae_counters ae_counters[]; +}; + +static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae, + u64 req_count, u64 resp_count) +{ + ae_counters->ae = ae; + ae_counters->values[ADF_FW_REQUESTS] = req_count; + ae_counters->values[ADF_FW_RESPONSES] = resp_count; +} + +static int adf_fw_counters_load_from_device(struct adf_accel_dev *accel_dev, + struct adf_fw_counters *fw_counters) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + unsigned long ae_mask; + unsigned int i; + unsigned long ae; + + /* Ignore the admin AEs */ + ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask; + + if (hweight_long(ae_mask) > fw_counters->ae_count) + return -EINVAL; + + i = 0; + for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { + u64 req_count, resp_count; + int ret; + + ret = adf_get_ae_fw_counters(accel_dev, ae, &req_count, &resp_count); + if (ret) + return ret; + + adf_fw_counters_parse_ae_values(&fw_counters->ae_counters[i++], ae, + req_count, resp_count); + } + + return 0; +} + +static struct adf_fw_counters *adf_fw_counters_allocate(unsigned long ae_count) +{ + struct adf_fw_counters *fw_counters; + + if (unlikely(!ae_count)) + return ERR_PTR(-EINVAL); + + fw_counters = kmalloc(struct_size(fw_counters, ae_counters, ae_count), GFP_KERNEL); + if (!fw_counters) + return ERR_PTR(-ENOMEM); + + fw_counters->ae_count = ae_count; + + return fw_counters; +} + +/** + * adf_fw_counters_get() - Return FW counters for the provided device. + * @accel_dev: Pointer to a QAT acceleration device + * + * Allocates and returns a table of counters containing execution statistics + * for each non-admin AE available through the supplied acceleration device. + * The caller becomes the owner of such memory and is responsible for + * the deallocation through a call to kfree(). + * + * Returns: a pointer to a dynamically allocated struct adf_fw_counters + * on success, or a negative value on error. + */ +static struct adf_fw_counters *adf_fw_counters_get(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_fw_counters *fw_counters; + unsigned long ae_count; + int ret; + + if (!adf_dev_started(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "QAT Device not started\n"); + return ERR_PTR(-EFAULT); + } + + /* Ignore the admin AEs */ + ae_count = hweight_long(hw_data->ae_mask & ~hw_data->admin_ae_mask); + + fw_counters = adf_fw_counters_allocate(ae_count); + if (IS_ERR(fw_counters)) + return fw_counters; + + ret = adf_fw_counters_load_from_device(accel_dev, fw_counters); + if (ret) { + kfree(fw_counters); + dev_err(&GET_DEV(accel_dev), + "Failed to create QAT fw_counters file table [%d].\n", ret); + return ERR_PTR(ret); + } + + return fw_counters; +} + +static void *qat_fw_counters_seq_start(struct seq_file *sfile, loff_t *pos) +{ + struct adf_fw_counters *fw_counters = sfile->private; + + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos > fw_counters->ae_count) + return NULL; + + return &fw_counters->ae_counters[*pos - 1]; +} + +static void *qat_fw_counters_seq_next(struct seq_file *sfile, void *v, loff_t *pos) +{ + struct adf_fw_counters *fw_counters = sfile->private; + + (*pos)++; + + if (*pos > fw_counters->ae_count) + return NULL; + + return &fw_counters->ae_counters[*pos - 1]; +} + +static void qat_fw_counters_seq_stop(struct seq_file *sfile, void *v) {} + +static int qat_fw_counters_seq_show(struct seq_file *sfile, void *v) +{ + int i; + + if (v == SEQ_START_TOKEN) { + seq_puts(sfile, "AE "); + for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i) + seq_printf(sfile, " %*s", ADF_FW_COUNTERS_MAX_PADDING, + adf_fw_counter_names[i]); + } else { + struct adf_ae_counters *ae_counters = (struct adf_ae_counters *)v; + + seq_printf(sfile, "%2d:", ae_counters->ae); + for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i) + seq_printf(sfile, " %*llu", ADF_FW_COUNTERS_MAX_PADDING, + ae_counters->values[i]); + } + seq_putc(sfile, '\n'); + + return 0; +} + +static const struct seq_operations qat_fw_counters_sops = { + .start = qat_fw_counters_seq_start, + .next = qat_fw_counters_seq_next, + .stop = qat_fw_counters_seq_stop, + .show = qat_fw_counters_seq_show, +}; + +static int qat_fw_counters_file_open(struct inode *inode, struct file *file) +{ + struct adf_accel_dev *accel_dev = inode->i_private; + struct seq_file *fw_counters_seq_file; + struct adf_fw_counters *fw_counters; + int ret; + + fw_counters = adf_fw_counters_get(accel_dev); + if (IS_ERR(fw_counters)) + return PTR_ERR(fw_counters); + + ret = seq_open(file, &qat_fw_counters_sops); + if (unlikely(ret)) { + kfree(fw_counters); + return ret; + } + + fw_counters_seq_file = file->private_data; + fw_counters_seq_file->private = fw_counters; + return ret; +} + +static int qat_fw_counters_file_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + + kfree(seq->private); + seq->private = NULL; + + return seq_release(inode, file); } + +static const struct file_operations qat_fw_counters_fops = { + .owner = THIS_MODULE, + .open = qat_fw_counters_file_open, + .read = seq_read, + .llseek = seq_lseek, + .release = qat_fw_counters_file_release, +}; + +/** + * adf_fw_counters_dbgfs_add() - Create a debugfs file containing FW + * execution counters. + * @accel_dev: Pointer to a QAT acceleration device + * + * Function creates a file to display a table with statistics for the given + * QAT acceleration device. The table stores device specific execution values + * for each AE, such as the number of requests sent to the FW and responses + * received from the FW. + * + * Return: void + */ +void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + accel_dev->fw_cntr_dbgfile = debugfs_create_file("fw_counters", 0400, + accel_dev->debugfs_dir, + accel_dev, + &qat_fw_counters_fops); +} + +/** + * adf_fw_counters_dbgfs_rm() - Remove the debugfs file containing FW counters. + * @accel_dev: Pointer to a QAT acceleration device. + * + * Function removes the file providing the table of statistics for the given + * QAT acceleration device. + * + * Return: void + */ +void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + debugfs_remove(accel_dev->fw_cntr_dbgfile); + accel_dev->fw_cntr_dbgfile = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h new file mode 100644 index 000000000000..91b3b6a95f1f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_FW_COUNTERS_H +#define ADF_FW_COUNTERS_H + +struct adf_accel_dev; + +void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev); +void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif From fd77d8da1fa85a7549ce3cd50222f9f795cc5f5d Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 30 Jun 2023 19:03:54 +0200 Subject: [PATCH 023/149] crypto: qat - add internal timer for qat 4xxx The power management feature in QAT 4xxx devices can disable clock sources used to implement timers. Because of that, the firmware needs to get an external reliable source of time. Add a kernel delayed work that periodically sends an event to the firmware. This is triggered every 200ms. At each execution, the driver sends a sync request to the firmware reporting the current timestamp counter value. This is a pre-requisite for enabling the heartbeat, telemetry and rate limiting features. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 3 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 3 + .../crypto/intel/qat/qat_common/adf_admin.c | 12 ++++ .../intel/qat/qat_common/adf_common_drv.h | 3 + .../intel/qat/qat_common/adf_gen4_timer.c | 70 +++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_timer.h | 21 ++++++ .../crypto/intel/qat/qat_common/adf_init.c | 13 ++++ drivers/crypto/intel/qat/qat_common/adf_isr.c | 6 ++ .../qat/qat_common/icp_qat_fw_init_admin.h | 5 ++ 10 files changed, 137 insertions(+) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index e543a9e24a06..831d460bc503 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -508,6 +509,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; + hw_data->start_timer = adf_gen4_timer_start; + hw_data->stop_timer = adf_gen4_timer_stop; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index f541046cdf9a..6a82ef8df733 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -17,6 +17,7 @@ intel_qat-objs := adf_cfg.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ + adf_gen4_timer.o \ qat_crypto.o \ qat_compression.o \ qat_comp_algs.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index a54a994b5e27..2198b410b029 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -188,6 +188,8 @@ struct adf_hw_device_data { int (*init_admin_comms)(struct adf_accel_dev *accel_dev); void (*exit_admin_comms)(struct adf_accel_dev *accel_dev); int (*send_admin_init)(struct adf_accel_dev *accel_dev); + int (*start_timer)(struct adf_accel_dev *accel_dev); + void (*stop_timer)(struct adf_accel_dev *accel_dev); int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev); @@ -296,6 +298,7 @@ struct adf_accel_dev { struct list_head list; struct module *owner; struct adf_accel_pci accel_pci_dev; + struct adf_timer *timer; union { struct { /* protects VF2PF interrupts access */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 4716b82d48ec..d6cb7f50bb5a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -241,6 +241,18 @@ int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u return 0; } +int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt) +{ + u32 ae_mask = accel_dev->hw_device->ae_mask; + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp = { }; + + req.cmd_id = ICP_QAT_FW_SYNC; + req.int_timer_ticks = cnt; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 4682be073c99..e0d472071354 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -96,6 +96,7 @@ void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); int adf_send_admin_init(struct adf_accel_dev *accel_dev); int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); +int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); @@ -194,6 +195,8 @@ int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, int adf_init_misc_wq(void); void adf_exit_misc_wq(void); bool adf_misc_wq_queue_work(struct work_struct *work); +bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, + unsigned long delay); #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c new file mode 100644 index 000000000000..646c57922fcd --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_timer.h" + +#define ADF_GEN4_TIMER_PERIOD_MS 200 + +/* This periodic update is used to trigger HB, RL & TL fw events */ +static void work_handler(struct work_struct *work) +{ + struct adf_accel_dev *accel_dev; + struct adf_timer *timer_ctx; + u32 time_periods; + + timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx); + accel_dev = timer_ctx->accel_dev; + + adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx, + msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS)); + + time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime), + ADF_GEN4_TIMER_PERIOD_MS); + + if (adf_send_admin_tim_sync(accel_dev, time_periods)) + dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n"); +} + +int adf_gen4_timer_start(struct adf_accel_dev *accel_dev) +{ + struct adf_timer *timer_ctx; + + timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL); + if (!timer_ctx) + return -ENOMEM; + + timer_ctx->accel_dev = accel_dev; + accel_dev->timer = timer_ctx; + timer_ctx->initial_ktime = ktime_get_real(); + + INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler); + adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx, + msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS)); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_timer_start); + +void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_timer *timer_ctx = accel_dev->timer; + + if (!timer_ctx) + return; + + cancel_delayed_work_sync(&timer_ctx->work_ctx); + + kfree(timer_ctx); + accel_dev->timer = NULL; +} +EXPORT_SYMBOL_GPL(adf_gen4_timer_stop); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h new file mode 100644 index 000000000000..66a709e7b358 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_GEN4_TIMER_H_ +#define ADF_GEN4_TIMER_H_ + +#include +#include + +struct adf_accel_dev; + +struct adf_timer { + struct adf_accel_dev *accel_dev; + struct delayed_work work_ctx; + ktime_t initial_ktime; +}; + +int adf_gen4_timer_start(struct adf_accel_dev *accel_dev); +void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev); + +#endif /* ADF_GEN4_TIMER_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 826179c98524..0acba0f988da 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -163,6 +163,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; + int ret; set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -187,6 +188,14 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) return -EFAULT; } + if (hw_data->start_timer) { + ret = hw_data->start_timer(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n"); + return ret; + } + } + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { @@ -235,6 +244,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) */ static void adf_dev_stop(struct adf_accel_dev *accel_dev) { + struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; struct list_head *list_itr; bool wait = false; @@ -270,6 +280,9 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) } } + if (hw_data->stop_timer) + hw_data->stop_timer(accel_dev); + if (wait) msleep(100); diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index ad9e135b8560..2aba194a7c29 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -380,3 +380,9 @@ bool adf_misc_wq_queue_work(struct work_struct *work) { return queue_work(adf_misc_wq, work); } + +bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, + unsigned long delay) +{ + return queue_delayed_work(adf_misc_wq, work, delay); +} diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 56cb827f93ea..d853c9242acf 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -37,6 +37,9 @@ struct icp_qat_fw_init_admin_req { __u16 ibuf_size_in_kb; __u16 resrvd3; }; + struct { + __u32 int_timer_ticks; + }; __u32 idle_filter; }; @@ -97,6 +100,8 @@ struct icp_qat_fw_init_admin_resp { }; } __packed; +#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC + #define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 #define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 #define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 From 7f77b6797b34862e6164335c894f587387bff082 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 30 Jun 2023 19:03:55 +0200 Subject: [PATCH 024/149] crypto: qat - drop obsolete heartbeat interface Drop legacy heartbeat interface from FW API as it is no longer used. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../intel/qat/qat_common/icp_qat_fw_init_admin.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index d853c9242acf..6ea19b4fb0ce 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -102,19 +102,4 @@ struct icp_qat_fw_init_admin_resp { #define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC -#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0 -#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1 -#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0 -#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1 -#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE -#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \ - ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags) - -#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \ - ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val) - -#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, \ - ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \ - ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK) #endif From e2980ba57e797e58a5476fbc4296f40551fb3404 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 30 Jun 2023 19:03:56 +0200 Subject: [PATCH 025/149] crypto: qat - add measure clock frequency The QAT hardware does not expose a mechanism to report its clock frequency. This is required to implement the Heartbeat feature. Add a clock measuring algorithm that estimates the frequency by comparing the internal timestamp counter incremented by the firmware with the time measured by the kernel. The frequency value is only used internally and not exposed to the user. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 10 ++ .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 4 + .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 25 ++++ .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h | 7 + .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 25 ++++ .../intel/qat/qat_c62x/adf_c62x_hw_data.h | 7 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 2 + .../crypto/intel/qat/qat_common/adf_admin.c | 17 +++ .../crypto/intel/qat/qat_common/adf_clock.c | 131 ++++++++++++++++++ .../crypto/intel/qat/qat_common/adf_clock.h | 14 ++ .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_init.c | 8 ++ .../qat/qat_common/icp_qat_fw_init_admin.h | 1 + .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 10 ++ .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.h | 5 + 16 files changed, 268 insertions(+) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_clock.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_clock.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 831d460bc503..9cda0c17992a 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -318,6 +319,14 @@ static void get_admin_info(struct admin_info *admin_csrs_info) admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; } +static u32 get_heartbeat_clock(struct adf_hw_device_data *self) +{ + /* + * 4XXX uses KPT counter for HB + */ + return ADF_4XXX_KPT_COUNTER_FREQ; +} + static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; @@ -511,6 +520,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->dev_config = adf_gen4_dev_config; hw_data->start_timer = adf_gen4_timer_start; hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->get_hb_clock = get_heartbeat_clock; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index e5b314d2b60e..bb3d95a8fb21 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -3,6 +3,7 @@ #ifndef ADF_4XXX_HW_DATA_H_ #define ADF_4XXX_HW_DATA_H_ +#include #include /* PCIe configuration space */ @@ -64,6 +65,9 @@ #define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin" #define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin" +/* Clocks frequency */ +#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) + /* qat_4xxx fuse bits are different from old GENs, redefine them */ enum icp_qat_4xxx_slice_mask { ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0), diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 475643654e64..2c6a1dd9780c 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include @@ -50,6 +51,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK; } +static u32 get_ts_clock(struct adf_hw_device_data *self) +{ + /* + * Timestamp update interval is 16 AE clock ticks for c3xxx. + */ + return self->clock_frequency / 16; +} + +static int measure_clock(struct adf_accel_dev *accel_dev) +{ + u32 frequency; + int ret; + + ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C3XXX_MIN_AE_FREQ, + ADF_C3XXX_MAX_AE_FREQ); + if (ret) + return ret; + + accel_dev->hw_device->clock_frequency = frequency; + return 0; +} + static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C3XXX_PMISC_BAR; @@ -127,6 +150,8 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; hw_data->dev_config = adf_gen2_dev_config; + hw_data->measure_clock = measure_clock; + hw_data->get_hb_clock = get_ts_clock; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h index 336a06f11dbd..690c6a1aa172 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -3,6 +3,8 @@ #ifndef ADF_C3XXX_HW_DATA_H_ #define ADF_C3XXX_HW_DATA_H_ +#include + /* PCIe configuration space */ #define ADF_C3XXX_PMISC_BAR 0 #define ADF_C3XXX_ETR_BAR 1 @@ -19,6 +21,11 @@ #define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48 #define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6 +/* Clocks frequency */ +#define ADF_C3XXX_AE_FREQ (685 * HZ_PER_MHZ) +#define ADF_C3XXX_MIN_AE_FREQ (533 * HZ_PER_MHZ) +#define ADF_C3XXX_MAX_AE_FREQ (685 * HZ_PER_MHZ) + /* Firmware Binary */ #define ADF_C3XXX_FW "qat_c3xxx.bin" #define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin" diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index e14270703670..081702be839e 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include @@ -50,6 +51,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK; } +static u32 get_ts_clock(struct adf_hw_device_data *self) +{ + /* + * Timestamp update interval is 16 AE clock ticks for c62x. + */ + return self->clock_frequency / 16; +} + +static int measure_clock(struct adf_accel_dev *accel_dev) +{ + u32 frequency; + int ret; + + ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C62X_MIN_AE_FREQ, + ADF_C62X_MAX_AE_FREQ); + if (ret) + return ret; + + accel_dev->hw_device->clock_frequency = frequency; + return 0; +} + static u32 get_misc_bar_id(struct adf_hw_device_data *self) { return ADF_C62X_PMISC_BAR; @@ -129,6 +152,8 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; hw_data->dev_config = adf_gen2_dev_config; + hw_data->measure_clock = measure_clock; + hw_data->get_hb_clock = get_ts_clock; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h index 008c0a3a9769..13e6ebf6fd91 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h @@ -3,6 +3,8 @@ #ifndef ADF_C62X_HW_DATA_H_ #define ADF_C62X_HW_DATA_H_ +#include + /* PCIe configuration space */ #define ADF_C62X_SRAM_BAR 0 #define ADF_C62X_PMISC_BAR 1 @@ -19,6 +21,11 @@ #define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80 #define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10 +/* Clocks frequency */ +#define ADF_C62X_AE_FREQ (685 * HZ_PER_MHZ) +#define ADF_C62X_MIN_AE_FREQ (533 * HZ_PER_MHZ) +#define ADF_C62X_MAX_AE_FREQ (800 * HZ_PER_MHZ) + /* Firmware Binary */ #define ADF_C62X_FW "qat_c62x.bin" #define ADF_C62X_MMP "qat_c62x_mmp.bin" diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 6a82ef8df733..b3e32d2abf45 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -18,6 +18,7 @@ intel_qat-objs := adf_cfg.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ adf_gen4_timer.o \ + adf_clock.o \ qat_crypto.o \ qat_compression.o \ qat_comp_algs.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 2198b410b029..c96f59cb7de9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -190,6 +190,8 @@ struct adf_hw_device_data { int (*send_admin_init)(struct adf_accel_dev *accel_dev); int (*start_timer)(struct adf_accel_dev *accel_dev); void (*stop_timer)(struct adf_accel_dev *accel_dev); + uint32_t (*get_hb_clock)(struct adf_hw_device_data *self); + int (*measure_clock)(struct adf_accel_dev *accel_dev); int (*init_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev); const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index d6cb7f50bb5a..966e7ea271a2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -15,6 +15,7 @@ #define ADF_CONST_TABLE_SIZE 1024 #define ADF_ADMIN_POLL_DELAY_US 20 #define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC) +#define ADF_ONE_AE 1 static const u8 const_tab[1024] __aligned(1024) = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -194,6 +195,22 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp) +{ + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + unsigned int ae_mask = ADF_ONE_AE; + int ret; + + req.cmd_id = ICP_QAT_FW_TIMER_GET; + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + *timestamp = resp.timestamp; + return 0; +} + static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev, u32 *capabilities) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c new file mode 100644 index 000000000000..dc0778691eb0 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_clock.h" +#include "adf_common_drv.h" + +#define MEASURE_CLOCK_RETRIES 10 +#define MEASURE_CLOCK_DELAY_US 10000 +#define ME_CLK_DIVIDER 16 +#define MEASURE_CLOCK_DELTA_THRESHOLD_US 100 + +static inline u64 timespec_to_us(const struct timespec64 *ts) +{ + return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_USEC); +} + +static inline u64 timespec_to_ms(const struct timespec64 *ts) +{ + return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_MSEC); +} + +u64 adf_clock_get_current_time(void) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + return timespec_to_ms(&ts); +} + +static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency) +{ + struct timespec64 ts1, ts2, ts3, ts4; + u64 timestamp1, timestamp2, temp; + u32 delta_us, tries; + int ret; + + tries = MEASURE_CLOCK_RETRIES; + do { + ktime_get_real_ts64(&ts1); + ret = adf_get_fw_timestamp(accel_dev, ×tamp1); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to get fw timestamp\n"); + return ret; + } + ktime_get_real_ts64(&ts2); + delta_us = timespec_to_us(&ts2) - timespec_to_us(&ts1); + } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries); + + if (!tries) { + dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n"); + return -ETIMEDOUT; + } + + fsleep(MEASURE_CLOCK_DELAY_US); + + tries = MEASURE_CLOCK_RETRIES; + do { + ktime_get_real_ts64(&ts3); + if (adf_get_fw_timestamp(accel_dev, ×tamp2)) { + dev_err(&GET_DEV(accel_dev), + "Failed to get fw timestamp\n"); + return -EIO; + } + ktime_get_real_ts64(&ts4); + delta_us = timespec_to_us(&ts4) - timespec_to_us(&ts3); + } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries); + + if (!tries) { + dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n"); + return -ETIMEDOUT; + } + + delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1); + temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10; + temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us); + /* + * Enclose the division to allow the preprocessor to precalculate it, + * and avoid promoting r-value to 64-bit before division. + */ + *frequency = temp * (HZ_PER_MHZ / 10); + + return 0; +} + +/** + * adf_dev_measure_clock() - measures device clock frequency + * @accel_dev: Pointer to acceleration device. + * @frequency: Pointer to variable where result will be stored + * @min: Minimal allowed frequency value + * @max: Maximal allowed frequency value + * + * If the measurement result will go beyond the min/max thresholds the value + * will take the value of the crossed threshold. + * + * This algorithm compares the device firmware timestamp with the kernel + * timestamp. So we can't expect too high accuracy from this measurement. + * + * Return: + * * 0 - measurement succeed + * * -ETIMEDOUT - measurement failed + */ +int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, + u32 *frequency, u32 min, u32 max) +{ + int ret; + u32 freq; + + ret = measure_clock(accel_dev, &freq); + if (ret) + return ret; + + *frequency = clamp(freq, min, max); + + if (*frequency != freq) + dev_warn(&GET_DEV(accel_dev), + "Measured clock %d Hz is out of range, assuming %d\n", + freq, *frequency); + return 0; +} +EXPORT_SYMBOL_GPL(adf_dev_measure_clock); diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.h b/drivers/crypto/intel/qat/qat_common/adf_clock.h new file mode 100644 index 000000000000..e309bc0dc35c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_clock.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_CLOCK_H +#define ADF_CLOCK_H + +#include + +struct adf_accel_dev; + +int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency, + u32 min, u32 max); +u64 adf_clock_get_current_time(void); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index e0d472071354..a9552832858f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -97,6 +97,7 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev); int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); +int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 0acba0f988da..53fca6a7e2af 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -178,6 +178,14 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) return -EFAULT; } + if (hw_data->measure_clock) { + ret = hw_data->measure_clock(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n"); + return ret; + } + } + /* Set ssm watch dog timer */ if (hw_data->set_ssm_wdtimer) hw_data->set_ssm_wdtimer(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 6ea19b4fb0ce..e836fbaeec62 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_PM_STATE_CONFIG = 128, }; diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 1ebe0b351fae..c57403a6fbac 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -44,6 +44,14 @@ static u32 get_misc_bar_id(struct adf_hw_device_data *self) return ADF_DH895XCC_PMISC_BAR; } +static u32 get_ts_clock(struct adf_hw_device_data *self) +{ + /* + * Timestamp update interval is 16 AE clock ticks for dh895xcc. + */ + return self->clock_frequency / 16; +} + static u32 get_etr_bar_id(struct adf_hw_device_data *self) { return ADF_DH895XCC_ETR_BAR; @@ -237,6 +245,8 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->reset_device = adf_reset_sbr; hw_data->disable_iov = adf_disable_sriov; hw_data->dev_config = adf_gen2_dev_config; + hw_data->clock_frequency = ADF_DH895X_AE_FREQ; + hw_data->get_hb_clock = get_ts_clock; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts; diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 7b674bbe4192..cd3a21985455 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -3,6 +3,8 @@ #ifndef ADF_DH895x_HW_DATA_H_ #define ADF_DH895x_HW_DATA_H_ +#include + /* PCIe configuration space */ #define ADF_DH895XCC_SRAM_BAR 0 #define ADF_DH895XCC_PMISC_BAR 1 @@ -30,6 +32,9 @@ #define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96 #define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12 +/* Clocks frequency */ +#define ADF_DH895X_AE_FREQ (933 * HZ_PER_MHZ) + /* FW names */ #define ADF_DH895XCC_FW "qat_895xcc.bin" #define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin" From 359b84f8db942ef46d24de8aa397790c3fae22e0 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 30 Jun 2023 19:03:57 +0200 Subject: [PATCH 026/149] crypto: qat - add heartbeat feature Under some circumstances, firmware in the QAT devices could become unresponsive. The Heartbeat feature provides a mechanism to detect unresponsive devices. The QAT FW periodically writes to memory a set of counters that allow to detect the liveness of a device. This patch adds logic to enable the reporting of those counters, analyze them and report if a device is alive or not. In particular this adds (1) heartbeat enabling, reading and detection logic (2) reporting of heartbeat status and configuration via debugfs (3) documentation for the newly created sysfs entries (4) configuration of FW settings related to heartbeat, e.g. tick period (5) logic to convert time in ms (provided by the user) to clock ticks This patch introduces a new folder in debugfs called heartbeat with the following attributes: - status - queries_sent - queries_failed - config All attributes except config are reading only. In particular: - `status` file returns 0 when device is operational and -1 otherwise. - `queries_sent` returns the total number of heartbeat queries sent. - `queries_failed` returns the total number of heartbeat queries failed. - `config` allows to adjust the frequency at which the firmware writes counters to memory. This period is given in milliseconds and it is fixed for GEN4 devices. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-driver-qat | 51 ++++ .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 3 + .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 1 + .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 1 + drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 7 + .../crypto/intel/qat/qat_common/adf_admin.c | 14 + .../intel/qat/qat_common/adf_cfg_strings.h | 2 + .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 9 +- .../intel/qat/qat_common/adf_gen2_config.c | 7 + .../intel/qat/qat_common/adf_gen2_hw_data.h | 3 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 3 + .../intel/qat/qat_common/adf_heartbeat.c | 268 ++++++++++++++++++ .../intel/qat/qat_common/adf_heartbeat.h | 73 +++++ .../qat/qat_common/adf_heartbeat_dbgfs.c | 194 +++++++++++++ .../qat/qat_common/adf_heartbeat_dbgfs.h | 12 + .../crypto/intel/qat/qat_common/adf_init.c | 7 + .../qat/qat_common/icp_qat_fw_init_admin.h | 4 + .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 1 + 21 files changed, 662 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_heartbeat.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_heartbeat.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 22d39c0ca1b2..6731ffacc5f0 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -8,3 +8,54 @@ Description: (RO) Read returns the number of requests sent to the FW and the num : Number of requests sent from Acceleration Engine N to FW and responses Acceleration Engine N received from FW + +What: /sys/kernel/debug/qat__/heartbeat/config +Date: November 2023 +KernelVersion: 6.6 +Contact: qat-linux@intel.com +Description: (RW) Read returns value of the Heartbeat update period. + Write to the file changes this period value. + + This period should reflect planned polling interval of device + health status. High frequency Heartbeat monitoring wastes CPU cycles + but minimizes the customer’s system downtime. Also, if there are + large service requests that take some time to complete, high frequency + Heartbeat monitoring could result in false reports of unresponsiveness + and in those cases, period needs to be increased. + + This parameter is effective only for c3xxx, c62x, dh895xcc devices. + 4xxx has this value internally fixed to 200ms. + + Default value is set to 500. Minimal allowed value is 200. + All values are expressed in milliseconds. + +What: /sys/kernel/debug/qat__/heartbeat/queries_failed +Date: November 2023 +KernelVersion: 6.6 +Contact: qat-linux@intel.com +Description: (RO) Read returns the number of times the device became unresponsive. + + Attribute returns value of the counter which is incremented when + status query results negative. + +What: /sys/kernel/debug/qat__/heartbeat/queries_sent +Date: November 2023 +KernelVersion: 6.6 +Contact: qat-linux@intel.com +Description: (RO) Read returns the number of times the control process checked + if the device is responsive. + + Attribute returns value of the counter which is incremented on + every status query. + +What: /sys/kernel/debug/qat__/heartbeat/status +Date: November 2023 +KernelVersion: 6.6 +Contact: qat-linux@intel.com +Description: (RO) Read returns the device health status. + + Returns 0 when device is healthy or -1 when is unresponsive + or the query failed to send. + + The driver does not monitor for Heartbeat. It is left for a user + to poll the status periodically. diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 9cda0c17992a..268a1f7694fc 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -521,6 +521,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->start_timer = adf_gen4_timer_start; hw_data->stop_timer = adf_gen4_timer_stop; hw_data->get_hb_clock = get_heartbeat_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 1a15600361d0..6d4e2e139ffa 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "adf_4xxx_hw_data.h" #include "qat_compression.h" @@ -77,6 +78,8 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) if (ret) return ret; + adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); + return 0; } diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 2c6a1dd9780c..e81d11409426 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -152,6 +152,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->dev_config = adf_gen2_dev_config; hw_data->measure_clock = measure_clock; hw_data->get_hb_clock = get_ts_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 081702be839e..1a8c8e3a48e9 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -154,6 +154,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->dev_config = adf_gen2_dev_config; hw_data->measure_clock = measure_clock; hw_data->get_hb_clock = get_ts_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index b3e32d2abf45..43622c7fca71 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -31,6 +31,8 @@ intel_qat-objs := adf_cfg.o \ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ + adf_heartbeat.o \ + adf_heartbeat_dbgfs.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index c96f59cb7de9..ab897e1717e0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -233,6 +233,7 @@ struct adf_hw_device_data { u8 num_accel; u8 num_logical_accel; u8 num_engines; + u32 num_hb_ctrs; }; /* CSR write macro */ @@ -245,6 +246,11 @@ struct adf_hw_device_data { #define ADF_CFG_NUM_SERVICES 4 #define ADF_SRV_TYPE_BIT_LEN 3 #define ADF_SRV_TYPE_MASK 0x7 +#define ADF_AE_ADMIN_THREAD 7 +#define ADF_NUM_THREADS_PER_AE 8 +#define ADF_NUM_PKE_STRAND 2 +#define ADF_AE_STRAND0_THREAD 8 +#define ADF_AE_STRAND1_THREAD 9 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev) #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars) @@ -301,6 +307,7 @@ struct adf_accel_dev { struct module *owner; struct adf_accel_pci accel_pci_dev; struct adf_timer *timer; + struct adf_heartbeat *heartbeat; union { struct { /* protects VF2PF interrupts access */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 966e7ea271a2..ff790823b868 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -8,6 +8,7 @@ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_heartbeat.h" #include "icp_qat_fw_init_admin.h" #define ADF_ADMIN_MAILBOX_STRIDE 0x1000 @@ -270,6 +271,19 @@ int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks) +{ + u32 ae_mask = accel_dev->hw_device->ae_mask; + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + + req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET; + req.init_cfg_ptr = accel_dev->heartbeat->dma.phy_addr; + req.heartbeat_ticks = ticks; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index 3ae1e5caee0e..6066dc637352 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -47,4 +47,6 @@ #define ADF_ETRMGR_CORE_AFFINITY_FORMAT \ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_ACCEL_STR "Accelerator%d" +#define ADF_HEARTBEAT_TIMER "HeartbeatTimer" + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index a9552832858f..799a2193d3e5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -97,6 +97,7 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev); int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); +int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 5080ecffab03..04845f8d72be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -7,6 +7,7 @@ #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_fw_counters.h" +#include "adf_heartbeat_dbgfs.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -58,8 +59,10 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) if (!accel_dev->debugfs_dir) return; - if (!accel_dev->is_vf) + if (!accel_dev->is_vf) { adf_fw_counters_dbgfs_add(accel_dev); + adf_heartbeat_dbgfs_add(accel_dev); + } } /** @@ -71,6 +74,8 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) if (!accel_dev->debugfs_dir) return; - if (!accel_dev->is_vf) + if (!accel_dev->is_vf) { + adf_heartbeat_dbgfs_rm(accel_dev); adf_fw_counters_dbgfs_rm(accel_dev); + } } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c index eeb30da7587a..c27ff6d18e11 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c @@ -7,6 +7,7 @@ #include "adf_common_drv.h" #include "qat_crypto.h" #include "qat_compression.h" +#include "adf_heartbeat.h" #include "adf_transport_access_macros.h" static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev) @@ -195,6 +196,12 @@ int adf_gen2_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + goto err; + + adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_DEFAULT_MS); + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); return ret; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h index e4bc07529be4..6bd341061de4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h @@ -145,6 +145,9 @@ do { \ #define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10) #define ADF_GEN2_ERRSSMSH_EN BIT(3) +/* Number of heartbeat counter pairs */ +#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE + /* Interrupts */ #define ADF_GEN2_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) #define ADF_GEN2_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 4fb4b3df5a18..02d7a019ebf8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -136,6 +136,9 @@ do { \ #define ADF_GEN4_VFLNOTIFY BIT(7) +/* Number of heartbeat counter pairs */ +#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c new file mode 100644 index 000000000000..7358aac8e56d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "adf_clock.h" +#include "adf_common_drv.h" +#include "adf_heartbeat.h" +#include "adf_transport_internal.h" +#include "icp_qat_fw_init_admin.h" + +/* Heartbeat counter pair */ +struct hb_cnt_pair { + __u16 resp_heartbeat_cnt; + __u16 req_heartbeat_cnt; +}; + +static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) +{ + u64 curr_time = adf_clock_get_current_time(); + u64 polling_time = curr_time - accel_dev->heartbeat->last_hb_check_time; + + if (polling_time < accel_dev->heartbeat->hb_timer) { + dev_warn(&GET_DEV(accel_dev), + "HB polling too frequent. Configured HB timer %d ms\n", + accel_dev->heartbeat->hb_timer); + return -EINVAL; + } + + accel_dev->heartbeat->last_hb_check_time = curr_time; + return 0; +} + +static int get_timer_ticks(struct adf_accel_dev *accel_dev, unsigned int *value) +{ + char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + u32 timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS; + int cfg_read_status; + u32 ticks; + int ret; + + cfg_read_status = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_HEARTBEAT_TIMER, timer_str); + if (cfg_read_status == 0) { + if (kstrtouint(timer_str, 10, &timer_ms)) + dev_dbg(&GET_DEV(accel_dev), + "kstrtouint failed to parse the %s, param value", + ADF_HEARTBEAT_TIMER); + } + + if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) { + dev_err(&GET_DEV(accel_dev), "Timer cannot be less than %u\n", + ADF_CFG_HB_TIMER_MIN_MS); + return -EINVAL; + } + + /* + * On 4xxx devices adf_timer is responsible for HB updates and + * its period is fixed to 200ms + */ + if (accel_dev->timer) + timer_ms = ADF_CFG_HB_TIMER_MIN_MS; + + ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks); + if (ret) + return ret; + + adf_heartbeat_save_cfg_param(accel_dev, timer_ms); + + accel_dev->heartbeat->hb_timer = timer_ms; + *value = ticks; + + return 0; +} + +static int check_ae(struct hb_cnt_pair *curr, struct hb_cnt_pair *prev, + u16 *count, const size_t hb_ctrs) +{ + size_t thr; + + /* loop through all threads in AE */ + for (thr = 0; thr < hb_ctrs; thr++) { + u16 req = curr[thr].req_heartbeat_cnt; + u16 resp = curr[thr].resp_heartbeat_cnt; + u16 last = prev[thr].resp_heartbeat_cnt; + + if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && resp == last) { + u16 retry = ++count[thr]; + + if (retry >= ADF_CFG_HB_COUNT_THRESHOLD) + return -EIO; + + } else { + count[thr] = 0; + } + } + return 0; +} + +static int adf_hb_get_status(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct hb_cnt_pair *live_stats, *last_stats, *curr_stats; + const size_t hb_ctrs = hw_device->num_hb_ctrs; + const unsigned long ae_mask = hw_device->ae_mask; + const size_t max_aes = hw_device->num_engines; + const size_t dev_ctrs = size_mul(max_aes, hb_ctrs); + const size_t stats_size = size_mul(dev_ctrs, sizeof(*curr_stats)); + struct hb_cnt_pair *ae_curr_p, *ae_prev_p; + u16 *count_fails, *ae_count_p; + size_t ae_offset; + size_t ae = 0; + int ret = 0; + + live_stats = accel_dev->heartbeat->dma.virt_addr; + last_stats = live_stats + dev_ctrs; + count_fails = (u16 *)(last_stats + dev_ctrs); + + curr_stats = kmemdup(live_stats, stats_size, GFP_KERNEL); + if (!curr_stats) + return -ENOMEM; + + /* loop through active AEs */ + for_each_set_bit(ae, &ae_mask, max_aes) { + ae_offset = size_mul(ae, hb_ctrs); + ae_curr_p = curr_stats + ae_offset; + ae_prev_p = last_stats + ae_offset; + ae_count_p = count_fails + ae_offset; + + ret = check_ae(ae_curr_p, ae_prev_p, ae_count_p, hb_ctrs); + if (ret) + break; + } + + /* Copy current stats for the next iteration */ + memcpy(last_stats, curr_stats, stats_size); + kfree(curr_stats); + + return ret; +} + +void adf_heartbeat_status(struct adf_accel_dev *accel_dev, + enum adf_device_heartbeat_status *hb_status) +{ + struct adf_heartbeat *hb; + + if (!adf_dev_started(accel_dev) || + test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { + *hb_status = HB_DEV_UNRESPONSIVE; + return; + } + + if (adf_hb_check_polling_freq(accel_dev) == -EINVAL) { + *hb_status = HB_DEV_UNSUPPORTED; + return; + } + + hb = accel_dev->heartbeat; + hb->hb_sent_counter++; + + if (adf_hb_get_status(accel_dev)) { + dev_err(&GET_DEV(accel_dev), + "Heartbeat ERROR: QAT is not responding.\n"); + *hb_status = HB_DEV_UNRESPONSIVE; + hb->hb_failed_counter++; + return; + } + + *hb_status = HB_DEV_ALIVE; +} + +int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms, + u32 *value) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 clk_per_sec; + + /* HB clock may be different than AE clock */ + if (!hw_data->get_hb_clock) + return -EINVAL; + + clk_per_sec = hw_data->get_hb_clock(hw_data); + *value = time_ms * (clk_per_sec / MSEC_PER_SEC); + + return 0; +} + +int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev, + unsigned int timer_ms) +{ + char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + + snprintf(timer_str, sizeof(timer_str), "%u", timer_ms); + return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_HEARTBEAT_TIMER, timer_str, + ADF_STR); +} +EXPORT_SYMBOL_GPL(adf_heartbeat_save_cfg_param); + +int adf_heartbeat_init(struct adf_accel_dev *accel_dev) +{ + struct adf_heartbeat *hb; + + hb = kzalloc(sizeof(*hb), GFP_KERNEL); + if (!hb) + goto err_ret; + + hb->dma.virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + &hb->dma.phy_addr, GFP_KERNEL); + if (!hb->dma.virt_addr) + goto err_free; + + accel_dev->heartbeat = hb; + + return 0; + +err_free: + kfree(hb); +err_ret: + return -ENOMEM; +} + +int adf_heartbeat_start(struct adf_accel_dev *accel_dev) +{ + unsigned int timer_ticks; + int ret; + + if (!accel_dev->heartbeat) { + dev_warn(&GET_DEV(accel_dev), "Heartbeat instance not found!"); + return -EFAULT; + } + + ret = get_timer_ticks(accel_dev, &timer_ticks); + if (ret) + return ret; + + ret = adf_send_admin_hb_timer(accel_dev, timer_ticks); + if (ret) + dev_warn(&GET_DEV(accel_dev), "Heartbeat not supported!"); + + return ret; +} + +void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev) +{ + struct adf_heartbeat *hb = accel_dev->heartbeat; + + if (!hb) + return; + + if (hb->dma.virt_addr) + dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + hb->dma.virt_addr, hb->dma.phy_addr); + + kfree(hb); + accel_dev->heartbeat = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h new file mode 100644 index 000000000000..297147f44150 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_HEARTBEAT_H_ +#define ADF_HEARTBEAT_H_ + +#include + +struct adf_accel_dev; +struct dentry; + +#define ADF_CFG_HB_TIMER_MIN_MS 200 +#define ADF_CFG_HB_TIMER_DEFAULT_MS 500 +#define ADF_CFG_HB_COUNT_THRESHOLD 3 + +enum adf_device_heartbeat_status { + HB_DEV_UNRESPONSIVE = 0, + HB_DEV_ALIVE, + HB_DEV_UNSUPPORTED, +}; + +struct adf_heartbeat { + unsigned int hb_sent_counter; + unsigned int hb_failed_counter; + unsigned int hb_timer; + u64 last_hb_check_time; + struct hb_dma_addr { + dma_addr_t phy_addr; + void *virt_addr; + } dma; + struct { + struct dentry *base_dir; + struct dentry *status; + struct dentry *cfg; + struct dentry *sent; + struct dentry *failed; + } dbgfs; +}; + +#ifdef CONFIG_DEBUG_FS +int adf_heartbeat_init(struct adf_accel_dev *accel_dev); +int adf_heartbeat_start(struct adf_accel_dev *accel_dev); +void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev); + +int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms, + uint32_t *value); +int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev, + unsigned int timer_ms); +void adf_heartbeat_status(struct adf_accel_dev *accel_dev, + enum adf_device_heartbeat_status *hb_status); + +#else +static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline int adf_heartbeat_start(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev) +{ +} + +static inline int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev, + unsigned int timer_ms) +{ + return 0; +} +#endif +#endif /* ADF_HEARTBEAT_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c new file mode 100644 index 000000000000..803cbfd838f0 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include "adf_cfg.h" +#include "adf_common_drv.h" +#include "adf_heartbeat.h" +#include "adf_heartbeat_dbgfs.h" + +#define HB_OK 0 +#define HB_ERROR -1 +#define HB_STATUS_MAX_STRLEN 4 +#define HB_STATS_MAX_STRLEN 16 + +static ssize_t adf_hb_stats_read(struct file *file, char __user *user_buffer, + size_t count, loff_t *ppos) +{ + char buf[HB_STATS_MAX_STRLEN]; + unsigned int *value; + int len; + + if (*ppos > 0) + return 0; + + value = file->private_data; + len = scnprintf(buf, sizeof(buf), "%u\n", *value); + + return simple_read_from_buffer(user_buffer, count, ppos, buf, len + 1); +} + +static const struct file_operations adf_hb_stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = adf_hb_stats_read, +}; + +static ssize_t adf_hb_status_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + enum adf_device_heartbeat_status hb_status; + char ret_str[HB_STATUS_MAX_STRLEN]; + struct adf_accel_dev *accel_dev; + int ret_code; + size_t len; + + if (*ppos > 0) + return 0; + + accel_dev = file->private_data; + ret_code = HB_OK; + + adf_heartbeat_status(accel_dev, &hb_status); + + if (hb_status != HB_DEV_ALIVE) + ret_code = HB_ERROR; + + len = scnprintf(ret_str, sizeof(ret_str), "%d\n", ret_code); + + return simple_read_from_buffer(user_buf, count, ppos, ret_str, len + 1); +} + +static const struct file_operations adf_hb_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = adf_hb_status_read, +}; + +static ssize_t adf_hb_cfg_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + struct adf_accel_dev *accel_dev; + unsigned int timer_ms; + int len; + + if (*ppos > 0) + return 0; + + accel_dev = file->private_data; + timer_ms = accel_dev->heartbeat->hb_timer; + len = scnprintf(timer_str, sizeof(timer_str), "%u\n", timer_ms); + + return simple_read_from_buffer(user_buf, count, ppos, timer_str, + len + 1); +} + +static ssize_t adf_hb_cfg_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char input_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + struct adf_accel_dev *accel_dev; + int ret, written_chars; + unsigned int timer_ms; + u32 ticks; + + accel_dev = file->private_data; + timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS; + + /* last byte left as string termination */ + if (count > sizeof(input_str) - 1) + return -EINVAL; + + written_chars = simple_write_to_buffer(input_str, sizeof(input_str) - 1, + ppos, user_buf, count); + if (written_chars > 0) { + ret = kstrtouint(input_str, 10, &timer_ms); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "heartbeat_cfg: Invalid value\n"); + return ret; + } + + if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) { + dev_err(&GET_DEV(accel_dev), + "heartbeat_cfg: Invalid value\n"); + return -EINVAL; + } + + /* + * On 4xxx devices adf_timer is responsible for HB updates and + * its period is fixed to 200ms + */ + if (accel_dev->timer) + timer_ms = ADF_CFG_HB_TIMER_MIN_MS; + + ret = adf_heartbeat_save_cfg_param(accel_dev, timer_ms); + if (ret) + return ret; + + ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks); + if (ret) + return ret; + + ret = adf_send_admin_hb_timer(accel_dev, ticks); + if (ret) + return ret; + + accel_dev->heartbeat->hb_timer = timer_ms; + } + + return written_chars; +} + +static const struct file_operations adf_hb_cfg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = adf_hb_cfg_read, + .write = adf_hb_cfg_write, +}; + +void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_heartbeat *hb = accel_dev->heartbeat; + + if (!hb) + return; + + hb->dbgfs.base_dir = debugfs_create_dir("heartbeat", accel_dev->debugfs_dir); + hb->dbgfs.status = debugfs_create_file("status", 0400, hb->dbgfs.base_dir, + accel_dev, &adf_hb_status_fops); + hb->dbgfs.sent = debugfs_create_file("queries_sent", 0400, hb->dbgfs.base_dir, + &hb->hb_sent_counter, &adf_hb_stats_fops); + hb->dbgfs.failed = debugfs_create_file("queries_failed", 0400, hb->dbgfs.base_dir, + &hb->hb_failed_counter, &adf_hb_stats_fops); + hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir, + accel_dev, &adf_hb_cfg_fops); +} +EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add); + +void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_heartbeat *hb = accel_dev->heartbeat; + + if (!hb) + return; + + debugfs_remove(hb->dbgfs.status); + hb->dbgfs.status = NULL; + debugfs_remove(hb->dbgfs.sent); + hb->dbgfs.sent = NULL; + debugfs_remove(hb->dbgfs.failed); + hb->dbgfs.failed = NULL; + debugfs_remove(hb->dbgfs.cfg); + hb->dbgfs.cfg = NULL; + debugfs_remove(hb->dbgfs.base_dir); + hb->dbgfs.base_dir = NULL; +} +EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_rm); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h new file mode 100644 index 000000000000..84dd29ea6454 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_HEARTBEAT_DBGFS_H_ +#define ADF_HEARTBEAT_DBGFS_H_ + +struct adf_accel_dev; + +void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev); +void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_HEARTBEAT_DBGFS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 53fca6a7e2af..89001fe92e76 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -8,6 +8,7 @@ #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_dbgfs.h" +#include "adf_heartbeat.h" static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); @@ -129,6 +130,8 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) return -EFAULT; } + adf_heartbeat_init(accel_dev); + /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services @@ -204,6 +207,8 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) } } + adf_heartbeat_start(accel_dev); + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { @@ -347,6 +352,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, service->init_status); } + adf_heartbeat_shutdown(accel_dev); + hw_data->disable_iov(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index e836fbaeec62..3e968a4bcc9c 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_PM_STATE_CONFIG = 128, }; @@ -41,6 +42,9 @@ struct icp_qat_fw_init_admin_req { struct { __u32 int_timer_ticks; }; + struct { + __u32 heartbeat_ticks; + }; __u32 idle_filter; }; diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index c57403a6fbac..8fbab905c5cc 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -247,6 +247,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->dev_config = adf_gen2_dev_config; hw_data->clock_frequency = ADF_DH895X_AE_FREQ; hw_data->get_hb_clock = get_ts_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts; From bec61a294dbec1c5929aa6f138cddcab68a62311 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 30 Jun 2023 19:03:58 +0200 Subject: [PATCH 027/149] crypto: qat - add heartbeat counters check A firmware update for QAT GEN2 changed the format of a data structure used to report the heartbeat counters. To support all firmware versions, extend the heartbeat logic with an algorithm that detects the number of counters returned by firmware. The algorithm detects the number of counters to be used (and size of the corresponding data structure) by the comparison the expected size of the data in memory, with the data which was written by the firmware. Firmware detection is done one time during the first read of heartbeat debugfs file to avoid increasing the time needed to load the module. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu --- .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 2 + .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../intel/qat/qat_common/adf_heartbeat.c | 68 +++++++++++++++++++ .../intel/qat/qat_common/adf_heartbeat.h | 6 ++ .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 2 + 6 files changed, 81 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index e81d11409426..9c00c441b602 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -8,6 +8,7 @@ #include #include #include "adf_c3xxx_hw_data.h" +#include "adf_heartbeat.h" #include "icp_qat_hw.h" /* Worker thread to service arbiter mappings */ @@ -153,6 +154,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->measure_clock = measure_clock; hw_data->get_hb_clock = get_ts_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 1a8c8e3a48e9..355a781693eb 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -8,6 +8,7 @@ #include #include #include "adf_c62x_hw_data.h" +#include "adf_heartbeat.h" #include "icp_qat_hw.h" /* Worker thread to service arbiter mappings */ @@ -155,6 +156,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->measure_clock = measure_clock; hw_data->get_hb_clock = get_ts_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index ab897e1717e0..e57abde66f4f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -190,6 +190,7 @@ struct adf_hw_device_data { int (*send_admin_init)(struct adf_accel_dev *accel_dev); int (*start_timer)(struct adf_accel_dev *accel_dev); void (*stop_timer)(struct adf_accel_dev *accel_dev); + void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev); uint32_t (*get_hb_clock)(struct adf_hw_device_data *self); int (*measure_clock)(struct adf_accel_dev *accel_dev); int (*init_arb)(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index 7358aac8e56d..beef9a5f6c75 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -20,6 +20,8 @@ #include "adf_transport_internal.h" #include "icp_qat_fw_init_admin.h" +#define ADF_HB_EMPTY_SIG 0xA5A5A5A5 + /* Heartbeat counter pair */ struct hb_cnt_pair { __u16 resp_heartbeat_cnt; @@ -42,6 +44,57 @@ static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) return 0; } +/** + * validate_hb_ctrs_cnt() - checks if the number of heartbeat counters should + * be updated by one to support the currently loaded firmware. + * @accel_dev: Pointer to acceleration device. + * + * Return: + * * true - hb_ctrs must increased by ADF_NUM_PKE_STRAND + * * false - no changes needed + */ +static bool validate_hb_ctrs_cnt(struct adf_accel_dev *accel_dev) +{ + const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs; + const size_t max_aes = accel_dev->hw_device->num_engines; + const size_t hb_struct_size = sizeof(struct hb_cnt_pair); + const size_t exp_diff_size = array3_size(ADF_NUM_PKE_STRAND, max_aes, + hb_struct_size); + const size_t dev_ctrs = size_mul(max_aes, hb_ctrs); + const size_t stats_size = size_mul(dev_ctrs, hb_struct_size); + const u32 exp_diff_cnt = exp_diff_size / sizeof(u32); + const u32 stats_el_cnt = stats_size / sizeof(u32); + struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr; + const u32 *mem_to_chk = (u32 *)(hb_stats + dev_ctrs); + u32 el_diff_cnt = 0; + int i; + + /* count how many bytes are different from pattern */ + for (i = 0; i < stats_el_cnt; i++) { + if (mem_to_chk[i] == ADF_HB_EMPTY_SIG) + break; + + el_diff_cnt++; + } + + return el_diff_cnt && el_diff_cnt == exp_diff_cnt; +} + +void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev) +{ + struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr; + const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs; + const size_t max_aes = accel_dev->hw_device->num_engines; + const size_t dev_ctrs = size_mul(max_aes, hb_ctrs); + const size_t stats_size = size_mul(dev_ctrs, sizeof(struct hb_cnt_pair)); + const size_t mem_items_to_fill = size_mul(stats_size, 2) / sizeof(u32); + + /* fill hb stats memory with pattern */ + memset32((uint32_t *)hb_stats, ADF_HB_EMPTY_SIG, mem_items_to_fill); + accel_dev->heartbeat->ctrs_cnt_checked = false; +} +EXPORT_SYMBOL_GPL(adf_heartbeat_check_ctrs); + static int get_timer_ticks(struct adf_accel_dev *accel_dev, unsigned int *value) { char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; @@ -123,6 +176,13 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev) size_t ae = 0; int ret = 0; + if (!accel_dev->heartbeat->ctrs_cnt_checked) { + if (validate_hb_ctrs_cnt(accel_dev)) + hw_device->num_hb_ctrs += ADF_NUM_PKE_STRAND; + + accel_dev->heartbeat->ctrs_cnt_checked = true; + } + live_stats = accel_dev->heartbeat->dma.virt_addr; last_stats = live_stats + dev_ctrs; count_fails = (u16 *)(last_stats + dev_ctrs); @@ -221,6 +281,11 @@ int adf_heartbeat_init(struct adf_accel_dev *accel_dev) if (!hb->dma.virt_addr) goto err_free; + /* + * Default set this flag as true to avoid unnecessary checks, + * it will be reset on platforms that need such a check + */ + hb->ctrs_cnt_checked = true; accel_dev->heartbeat = hb; return 0; @@ -241,6 +306,9 @@ int adf_heartbeat_start(struct adf_accel_dev *accel_dev) return -EFAULT; } + if (accel_dev->hw_device->check_hb_ctrs) + accel_dev->hw_device->check_hb_ctrs(accel_dev); + ret = get_timer_ticks(accel_dev, &timer_ticks); if (ret) return ret; diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index 297147f44150..b22e3cb29798 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -24,6 +24,7 @@ struct adf_heartbeat { unsigned int hb_failed_counter; unsigned int hb_timer; u64 last_hb_check_time; + bool ctrs_cnt_checked; struct hb_dma_addr { dma_addr_t phy_addr; void *virt_addr; @@ -48,6 +49,7 @@ int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev, unsigned int timer_ms); void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status); +void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev); #else static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) @@ -69,5 +71,9 @@ static inline int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev, { return 0; } + +static inline void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev) +{ +} #endif #endif /* ADF_HEARTBEAT_H_ */ diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 8fbab905c5cc..09551f949126 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -7,6 +7,7 @@ #include #include #include "adf_dh895xcc_hw_data.h" +#include "adf_heartbeat.h" #include "icp_qat_hw.h" #define ADF_DH895XCC_VF_MSK 0xFFFFFFFF @@ -248,6 +249,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->clock_frequency = ADF_DH895X_AE_FREQ; hw_data->get_hb_clock = get_ts_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts; From 039980de89dc9dd757418d6f296e4126cc3f86c3 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Sun, 2 Jul 2023 19:35:02 +0200 Subject: [PATCH 028/149] hwrng: nomadik - keep clock enabled while hwrng is registered The nomadik driver uses devres to register itself with the hwrng core, the driver will be unregistered from hwrng when its device goes out of scope. This happens after the driver's remove function is called. However, nomadik's clock is disabled in the remove function. There's a short timeframe where nomadik is still registered with the hwrng core although its clock is disabled. I suppose the clock must be active to access the hardware and serve requests from the hwrng core. Switch to devm_clk_get_enabled and let devres disable the clock and unregister the hwrng. This avoids the race condition. Fixes: 3e75241be808 ("hwrng: drivers - Use device-managed registration API") Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/nomadik-rng.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index e8f9621e7954..3774adf903a8 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -13,8 +13,6 @@ #include #include -static struct clk *rng_clk; - static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { void __iomem *base = (void __iomem *)rng->priv; @@ -36,21 +34,20 @@ static struct hwrng nmk_rng = { static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) { + struct clk *rng_clk; void __iomem *base; int ret; - rng_clk = devm_clk_get(&dev->dev, NULL); + rng_clk = devm_clk_get_enabled(&dev->dev, NULL); if (IS_ERR(rng_clk)) { dev_err(&dev->dev, "could not get rng clock\n"); ret = PTR_ERR(rng_clk); return ret; } - clk_prepare_enable(rng_clk); - ret = amba_request_regions(dev, dev->dev.init_name); if (ret) - goto out_clk; + return ret; ret = -ENOMEM; base = devm_ioremap(&dev->dev, dev->res.start, resource_size(&dev->res)); @@ -64,15 +61,12 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) out_release: amba_release_regions(dev); -out_clk: - clk_disable_unprepare(rng_clk); return ret; } static void nmk_rng_remove(struct amba_device *dev) { amba_release_regions(dev); - clk_disable_unprepare(rng_clk); } static const struct amba_id nmk_rng_ids[] = { From 6a52ee38c79825ef469ab3ed8588d17e90d2049c Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Sun, 2 Jul 2023 19:35:03 +0200 Subject: [PATCH 029/149] hwrng: nomadik - use dev_err_probe Use dev_err_probe to print a message and return an error. This makes the code a tiny bit shorter. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/nomadik-rng.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index 3774adf903a8..8c6a40d6ce3d 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -39,11 +39,8 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) int ret; rng_clk = devm_clk_get_enabled(&dev->dev, NULL); - if (IS_ERR(rng_clk)) { - dev_err(&dev->dev, "could not get rng clock\n"); - ret = PTR_ERR(rng_clk); - return ret; - } + if (IS_ERR(rng_clk)) + return dev_err_probe(&dev->dev, PTR_ERR(rng_clk), "could not get rng clock\n"); ret = amba_request_regions(dev, dev->dev.init_name); if (ret) From 8690b09c30b274691bc5bb7b1332a27a7cbe5db7 Mon Sep 17 00:00:00 2001 From: You Kangren Date: Tue, 4 Jul 2023 20:45:32 +0800 Subject: [PATCH 030/149] crypto: qat - replace the if statement with min() Mark UWORD_CPYBUF_SIZE with U suffix to make its type the same with words_num. Then replace the if statement with min() in qat_uclo_wr_uimage_raw_page() to make code shorter. Reviewed-by: Andy Shevchenko Signed-off-by: You Kangren Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index ce837bcc1cab..4bd150d1441a 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -11,7 +11,7 @@ #include "icp_qat_hal.h" #include "icp_qat_fw_loader_handle.h" -#define UWORD_CPYBUF_SIZE 1024 +#define UWORD_CPYBUF_SIZE 1024U #define INVLD_UWORD 0xffffffffffull #define PID_MINOR_REV 0xf #define PID_MAJOR_REV (0xf << 4) @@ -1986,10 +1986,7 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, uw_relative_addr = 0; words_num = encap_page->micro_words_num; while (words_num) { - if (words_num < UWORD_CPYBUF_SIZE) - cpylen = words_num; - else - cpylen = UWORD_CPYBUF_SIZE; + cpylen = min(words_num, UWORD_CPYBUF_SIZE); /* load the buffer */ for (i = 0; i < cpylen; i++) From ea6084559285f4436434630a0e0d9e82709a04f2 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Tue, 4 Jul 2023 19:06:44 +0200 Subject: [PATCH 031/149] hwrng: imx-rngc - use dev_err_probe Simplify the code by calling dev_err_probe instead of dev_err and return. While at it, use the same device for all error messages. Signed-off-by: Martin Kaiser Reviewed-by: Rouven Czerwinski Reviewed-by: Rouven Czerwinski Signed-off-by: Herbert Xu --- drivers/char/hw_random/imx-rngc.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index bf07f17f78c8..e4b385b01b11 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -239,10 +239,8 @@ static int __init imx_rngc_probe(struct platform_device *pdev) return PTR_ERR(rngc->base); rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL); - if (IS_ERR(rngc->clk)) { - dev_err(&pdev->dev, "Can not get rng_clk\n"); - return PTR_ERR(rngc->clk); - } + if (IS_ERR(rngc->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n"); irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -272,24 +270,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); - if (ret) { - dev_err(rngc->dev, "Can't get interrupt working.\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n"); if (self_test) { ret = imx_rngc_self_test(rngc); - if (ret) { - dev_err(rngc->dev, "self test failed\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "self test failed\n"); } ret = devm_hwrng_register(&pdev->dev, &rngc->rng); - if (ret) { - dev_err(&pdev->dev, "hwrng registration failed\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n"); dev_info(&pdev->dev, "Freescale RNG%c registered (HW revision %d.%02d)\n", From b4198a9a538c330cdff7239f8aba0792d31fab42 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Tue, 4 Jul 2023 19:10:51 +0200 Subject: [PATCH 032/149] hwrng: exynos - switch to DEFINE_SIMPLE_DEV_PM_OPS SIMPLE_DEV_PM_OPS is deprecated, replace it with DEFINE_SIMPLE_DEV_PM_OPS and use pm_sleep_ptr for setting the driver's pm routines. We can now remove the __maybe_unused qualifier in the suspend and resume functions. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/exynos-trng.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c index 9cc3d542dd0f..30207b7ac5f4 100644 --- a/drivers/char/hw_random/exynos-trng.c +++ b/drivers/char/hw_random/exynos-trng.c @@ -185,14 +185,14 @@ static int exynos_trng_remove(struct platform_device *pdev) return 0; } -static int __maybe_unused exynos_trng_suspend(struct device *dev) +static int exynos_trng_suspend(struct device *dev) { pm_runtime_put_sync(dev); return 0; } -static int __maybe_unused exynos_trng_resume(struct device *dev) +static int exynos_trng_resume(struct device *dev) { int ret; @@ -205,7 +205,7 @@ static int __maybe_unused exynos_trng_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend, +static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend, exynos_trng_resume); static const struct of_device_id exynos_trng_dt_match[] = { @@ -219,7 +219,7 @@ MODULE_DEVICE_TABLE(of, exynos_trng_dt_match); static struct platform_driver exynos_trng_driver = { .driver = { .name = "exynos-trng", - .pm = &exynos_trng_pm_ops, + .pm = pm_sleep_ptr(&exynos_trng_pm_ops), .of_match_table = exynos_trng_dt_match, }, .probe = exynos_trng_probe, From b157d50bd2de501023391bab81f6198153398b2d Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Tue, 4 Jul 2023 19:32:00 +0200 Subject: [PATCH 033/149] hwrng: pic32 - enable compile-testing Enable compile testing for the pic32 driver. Remove the dependency on HW_RANDOM. The pic32 config section is under "if HW_RANDOM". Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index e0b3786ca51b..1aeba12391a1 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -385,7 +385,7 @@ config HW_RANDOM_STM32 config HW_RANDOM_PIC32 tristate "Microchip PIC32 Random Number Generator support" - depends on HW_RANDOM && MACH_PIC32 + depends on MACH_PIC32 || COMPILE_TEST default y help This driver provides kernel-side support for the Random Number From 6755ad74aac0fb1c79b14724feb81b2f6ff25847 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Tue, 4 Jul 2023 19:32:01 +0200 Subject: [PATCH 034/149] hwrng: pic32 - use devm_clk_get_enabled Use devm_clk_get_enabled in the pic32 driver. Ensure that the clock is enabled as long as the driver is registered with the hwrng core. Fixes: 7ea39973d1e5 ("hwrng: pic32 - Use device-managed registration API") Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/pic32-rng.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index 99c8bd0859a1..e04a054e8930 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -36,7 +36,6 @@ struct pic32_rng { void __iomem *base; struct hwrng rng; - struct clk *clk; }; /* @@ -70,6 +69,7 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max, static int pic32_rng_probe(struct platform_device *pdev) { struct pic32_rng *priv; + struct clk *clk; u32 v; int ret; @@ -81,13 +81,9 @@ static int pic32_rng_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - ret = clk_prepare_enable(priv->clk); - if (ret) - return ret; + clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); /* enable TRNG in enhanced mode */ v = TRNGEN | TRNGMOD; @@ -98,15 +94,11 @@ static int pic32_rng_probe(struct platform_device *pdev) ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) - goto err_register; + return ret; platform_set_drvdata(pdev, priv); return 0; - -err_register: - clk_disable_unprepare(priv->clk); - return ret; } static int pic32_rng_remove(struct platform_device *pdev) @@ -114,7 +106,6 @@ static int pic32_rng_remove(struct platform_device *pdev) struct pic32_rng *rng = platform_get_drvdata(pdev); writel(0, rng->base + RNGCON); - clk_disable_unprepare(rng->clk); return 0; } From 97c63a9dea2929fc4fd42f1d4446d97b6c011a1f Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Tue, 4 Jul 2023 19:32:02 +0200 Subject: [PATCH 035/149] hwrng: pic32 - remove unused defines Remove some unused defines and fix the indentation. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/pic32-rng.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index e04a054e8930..c1b3f5915f03 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -19,19 +19,12 @@ #include #define RNGCON 0x04 -#define TRNGEN BIT(8) -#define PRNGEN BIT(9) -#define PRNGCONT BIT(10) -#define TRNGMOD BIT(11) -#define SEEDLOAD BIT(12) -#define RNGPOLY1 0x08 -#define RNGPOLY2 0x0C -#define RNGNUMGEN1 0x10 -#define RNGNUMGEN2 0x14 +#define TRNGEN BIT(8) +#define TRNGMOD BIT(11) #define RNGSEED1 0x18 #define RNGSEED2 0x1C #define RNGRCNT 0x20 -#define RCNT_MASK 0x7F +#define RCNT_MASK 0x7F struct pic32_rng { void __iomem *base; From ac0042fa5aa5ac7929d95f4bf9185429ccdb70cb Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Tue, 4 Jul 2023 19:32:03 +0200 Subject: [PATCH 036/149] hwrng: pic32 - enable TRNG only while it's used The probe function enables the TRNG hardware before registering the driver. If registration fails, probe returns an error, but the TRNG remains enabled. Define init and cleanup functions, enable and disable the hardware there. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/pic32-rng.c | 41 ++++++++++++++---------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index c1b3f5915f03..1902f4389a3f 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -38,6 +38,15 @@ struct pic32_rng { */ #define RNG_TIMEOUT 500 +static int pic32_rng_init(struct hwrng *rng) +{ + struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng); + + /* enable TRNG in enhanced mode */ + writel(TRNGEN | TRNGMOD, priv->base + RNGCON); + return 0; +} + static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { @@ -59,12 +68,17 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max, return -EIO; } +static void pic32_rng_cleanup(struct hwrng *rng) +{ + struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng); + + writel(0, priv->base + RNGCON); +} + static int pic32_rng_probe(struct platform_device *pdev) { struct pic32_rng *priv; struct clk *clk; - u32 v; - int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -78,28 +92,12 @@ static int pic32_rng_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - /* enable TRNG in enhanced mode */ - v = TRNGEN | TRNGMOD; - writel(v, priv->base + RNGCON); - priv->rng.name = pdev->name; + priv->rng.init = pic32_rng_init; priv->rng.read = pic32_rng_read; + priv->rng.cleanup = pic32_rng_cleanup; - ret = devm_hwrng_register(&pdev->dev, &priv->rng); - if (ret) - return ret; - - platform_set_drvdata(pdev, priv); - - return 0; -} - -static int pic32_rng_remove(struct platform_device *pdev) -{ - struct pic32_rng *rng = platform_get_drvdata(pdev); - - writel(0, rng->base + RNGCON); - return 0; + return devm_hwrng_register(&pdev->dev, &priv->rng); } static const struct of_device_id pic32_rng_of_match[] __maybe_unused = { @@ -110,7 +108,6 @@ MODULE_DEVICE_TABLE(of, pic32_rng_of_match); static struct platform_driver pic32_rng_driver = { .probe = pic32_rng_probe, - .remove = pic32_rng_remove, .driver = { .name = "pic32-rng", .of_match_table = of_match_ptr(pic32_rng_of_match), From aa4b2f9ea53e9fc93f9d30746c32af5ac25f2a46 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Jul 2023 15:21:54 +0800 Subject: [PATCH 037/149] crypto: omap-des - Use devm_platform_get_and_ioremap_resource() Convert platform_get_resource(), devm_ioremap_resource() to a single call to devm_platform_get_and_ioremap_resource(), as this is exactly what this function does. Signed-off-by: Yangtao Li Signed-off-by: Herbert Xu --- drivers/crypto/omap-des.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index f783769ea110..371a51094e34 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -971,18 +971,12 @@ static int omap_des_probe(struct platform_device *pdev) dd->dev = dev; platform_set_drvdata(pdev, dd); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "no MEM resource info\n"); - goto err_res; - } - err = (dev->of_node) ? omap_des_get_of(dd, pdev) : omap_des_get_pdev(dd, pdev); if (err) goto err_res; - dd->io_base = devm_ioremap_resource(dev, res); + dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(dd->io_base)) { err = PTR_ERR(dd->io_base); goto err_res; From 1c5ff2fc35ac2c6da3509f7afa97b91f328dcfaa Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Jul 2023 15:21:55 +0800 Subject: [PATCH 038/149] crypto: keembay - Convert to devm_platform_ioremap_resource() Use devm_platform_ioremap_resource() to simplify code. Signed-off-by: Yangtao Li Signed-off-by: Herbert Xu --- drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index d4bcbed1f546..51a6de6294cb 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -1170,7 +1170,6 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ocs_hcu_dev *hcu_dev; - struct resource *hcu_mem; int rc; hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL); @@ -1184,14 +1183,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) if (rc) return rc; - /* Get the memory address and remap. */ - hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!hcu_mem) { - dev_err(dev, "Could not retrieve io mem resource.\n"); - return -ENODEV; - } - - hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem); + hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hcu_dev->io_base)) return PTR_ERR(hcu_dev->io_base); From 3aaafe054b7140418c9355512bc1a7a4980943c3 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Jul 2023 15:21:56 +0800 Subject: [PATCH 039/149] crypto: atmel-aes - Use devm_platform_get_and_ioremap_resource() Convert platform_get_resource(), devm_ioremap_resource() to a single call to devm_platform_get_and_ioremap_resource(), as this is exactly what this function does. Signed-off-by: Yangtao Li Signed-off-by: Herbert Xu --- drivers/crypto/atmel-aes.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 143d33fbb316..9692254faad9 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2566,11 +2566,9 @@ static int atmel_aes_probe(struct platform_device *pdev) crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); - /* Get the base address */ - aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!aes_res) { - dev_err(dev, "no MEM resource info\n"); - err = -ENODEV; + aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res); + if (IS_ERR(aes_dd->io_base)) { + err = PTR_ERR(aes_dd->io_base); goto err_tasklet_kill; } aes_dd->phys_base = aes_res->start; @@ -2597,13 +2595,6 @@ static int atmel_aes_probe(struct platform_device *pdev) goto err_tasklet_kill; } - aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res); - if (IS_ERR(aes_dd->io_base)) { - dev_err(dev, "can't ioremap\n"); - err = PTR_ERR(aes_dd->io_base); - goto err_tasklet_kill; - } - err = clk_prepare(aes_dd->iclk); if (err) goto err_tasklet_kill; From f069fa9d789d908e21350fcfb6a34271a3f2d9b6 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Jul 2023 15:21:57 +0800 Subject: [PATCH 040/149] crypto: atmel-sha - Use devm_platform_get_and_ioremap_resource() Convert platform_get_resource(), devm_ioremap_resource() to a single call to devm_platform_get_and_ioremap_resource(), as this is exactly what this function does. Signed-off-by: Yangtao Li Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 6bef634d3c86..f2031f934be9 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2604,11 +2604,9 @@ static int atmel_sha_probe(struct platform_device *pdev) crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); - /* Get the base address */ - sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!sha_res) { - dev_err(dev, "no MEM resource info\n"); - err = -ENODEV; + sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res); + if (IS_ERR(sha_dd->io_base)) { + err = PTR_ERR(sha_dd->io_base); goto err_tasklet_kill; } sha_dd->phys_base = sha_res->start; @@ -2635,13 +2633,6 @@ static int atmel_sha_probe(struct platform_device *pdev) goto err_tasklet_kill; } - sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); - if (IS_ERR(sha_dd->io_base)) { - dev_err(dev, "can't ioremap\n"); - err = PTR_ERR(sha_dd->io_base); - goto err_tasklet_kill; - } - err = clk_prepare(sha_dd->iclk); if (err) goto err_tasklet_kill; From 32f91bb3df785a6334aa5acd46ac85dabff7368b Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Jul 2023 15:21:58 +0800 Subject: [PATCH 041/149] crypto: atmel-tdes - Use devm_platform_get_and_ioremap_resource() Convert platform_get_resource(), devm_ioremap_resource() to a single call to devm_platform_get_and_ioremap_resource(), as this is exactly what this function does. Signed-off-by: Yangtao Li Signed-off-by: Herbert Xu --- drivers/crypto/atmel-tdes.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index c9ded8be9c39..ba8981f326cf 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1172,11 +1172,9 @@ static int atmel_tdes_probe(struct platform_device *pdev) crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); - /* Get the base address */ - tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!tdes_res) { - dev_err(dev, "no MEM resource info\n"); - err = -ENODEV; + tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res); + if (IS_ERR(tdes_dd->io_base)) { + err = PTR_ERR(tdes_dd->io_base); goto err_tasklet_kill; } tdes_dd->phys_base = tdes_res->start; @@ -1203,12 +1201,6 @@ static int atmel_tdes_probe(struct platform_device *pdev) goto err_tasklet_kill; } - tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res); - if (IS_ERR(tdes_dd->io_base)) { - err = PTR_ERR(tdes_dd->io_base); - goto err_tasklet_kill; - } - err = atmel_tdes_hw_version_init(tdes_dd); if (err) goto err_tasklet_kill; From 66c7b6473e2d6838973216a853ed1cc8dd193dc7 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Jul 2023 19:52:42 +0800 Subject: [PATCH 042/149] hwrng: timeriomem - Use devm_platform_get_and_ioremap_resource() Convert platform_get_resource(), devm_ioremap_resource() to a single call to devm_platform_get_and_ioremap_resource(), as this is exactly what this function does. Signed-off-by: Yangtao Li Reviewed-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/timeriomem-rng.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index 26f322d19a88..3db9d868efb1 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c @@ -113,16 +113,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev) return -EINVAL; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENXIO; - - if (res->start % 4 != 0 || resource_size(res) < 4) { - dev_err(&pdev->dev, - "address must be at least four bytes wide and 32-bit aligned\n"); - return -EINVAL; - } - /* Allocate memory for the device structure (and zero it) */ priv = devm_kzalloc(&pdev->dev, sizeof(struct timeriomem_rng_private), GFP_KERNEL); @@ -131,6 +121,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); + priv->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->io_base)) + return PTR_ERR(priv->io_base); + + if (res->start % 4 != 0 || resource_size(res) < 4) { + dev_err(&pdev->dev, + "address must be at least four bytes wide and 32-bit aligned\n"); + return -EINVAL; + } + if (pdev->dev.of_node) { int i; @@ -158,11 +158,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev) priv->rng_ops.name = dev_name(&pdev->dev); priv->rng_ops.read = timeriomem_rng_read; - priv->io_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->io_base)) { - return PTR_ERR(priv->io_base); - } - /* Assume random data is already available. */ priv->present = 1; complete(&priv->completion); From b3882fa2a10e28b7642d0c11fcc26caab90e0fa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 7 Jul 2023 09:07:53 +0200 Subject: [PATCH 043/149] crypto: starfive - Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is ignored (apart from emitting a warning) and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Eventually after all drivers are converted, .remove_new() is renamed to .remove(). Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/starfive/jh7110-cryp.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c index cc43556b6c80..e573c097a4a0 100644 --- a/drivers/crypto/starfive/jh7110-cryp.c +++ b/drivers/crypto/starfive/jh7110-cryp.c @@ -212,7 +212,7 @@ err_probe_defer: return ret; } -static int starfive_cryp_remove(struct platform_device *pdev) +static void starfive_cryp_remove(struct platform_device *pdev) { struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev); @@ -233,8 +233,6 @@ static int starfive_cryp_remove(struct platform_device *pdev) clk_disable_unprepare(cryp->hclk); clk_disable_unprepare(cryp->ahb); reset_control_assert(cryp->rst); - - return 0; } static const struct of_device_id starfive_dt_ids[] __maybe_unused = { @@ -245,7 +243,7 @@ MODULE_DEVICE_TABLE(of, starfive_dt_ids); static struct platform_driver starfive_cryp_driver = { .probe = starfive_cryp_probe, - .remove = starfive_cryp_remove, + .remove_new = starfive_cryp_remove, .driver = { .name = DRIVER_NAME, .of_match_table = starfive_dt_ids, From b0ab0797f7ab74bd6e78ddce7d2ea580e588c60d Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 7 Jul 2023 21:18:19 +0800 Subject: [PATCH 044/149] crypto: hisilicon/hpre - ensure private key less than n The private key of the curve key size generated by stdrng, which maybe not less than n. Therefore, the private key with the curve key size minus 1 is generated to ensure that the private key is less than n. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 8ede77310dc5..9a1c61be32cc 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1392,9 +1392,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + unsigned int sz, sz_shift, curve_sz; struct device *dev = ctx->dev; char key[HPRE_ECC_MAX_KSZ]; - unsigned int sz, sz_shift; struct ecdh params; int ret; @@ -1406,7 +1406,13 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, /* Use stdrng to generate private key */ if (!params.key || !params.key_size) { params.key = key; - params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id); + curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); + if (!curve_sz) { + dev_err(dev, "Invalid curve size!\n"); + return -EINVAL; + } + + params.key_size = curve_sz - 1; ret = ecdh_gen_privkey(ctx, ¶ms); if (ret) return ret; From d4211390d88902462a7642eb22fd83083588eaf7 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 10 Jul 2023 22:27:27 +0200 Subject: [PATCH 045/149] hwrng: ingenic - enable compile testing Enable compile testing for the ingenic-trng driver. Remove the dependency on HW_RANDOM. The ingenic-trng config section is under "if HW_RANDOM". Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 1aeba12391a1..5d72b8da3c36 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -271,8 +271,7 @@ config HW_RANDOM_INGENIC_RNG config HW_RANDOM_INGENIC_TRNG tristate "Ingenic True Random Number Generator support" - depends on HW_RANDOM - depends on MACH_X1830 + depends on MACH_X1830 || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the True Random Number Generator From 4cb9a7271f46eb1cc58099a3503d4b3f3969e37c Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 10 Jul 2023 22:27:28 +0200 Subject: [PATCH 046/149] hwrng: ingenic - remove two unused defines Remove two defines which are not used in the ingenic-trng driver's code. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ingenic-trng.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c index 0eb80f786f4d..64286cbdf158 100644 --- a/drivers/char/hw_random/ingenic-trng.c +++ b/drivers/char/hw_random/ingenic-trng.c @@ -22,8 +22,6 @@ #define TRNG_REG_STATUS_OFFSET 0x08 /* bits within the CFG register */ -#define CFG_RDY_CLR BIT(12) -#define CFG_INT_MASK BIT(11) #define CFG_GEN_EN BIT(0) /* bits within the STATUS register */ From 099f236879068547a05d02b92fb2861694027440 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 10 Jul 2023 22:27:29 +0200 Subject: [PATCH 047/149] hwrng: ingenic - remove dead assignments Don't assign a value to ret if we're about to return from the probe function and ret's value is not used. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ingenic-trng.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c index 64286cbdf158..bfec28ceab00 100644 --- a/drivers/char/hw_random/ingenic-trng.c +++ b/drivers/char/hw_random/ingenic-trng.c @@ -86,13 +86,11 @@ static int ingenic_trng_probe(struct platform_device *pdev) trng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->base)) { pr_err("%s: Failed to map DTRNG registers\n", __func__); - ret = PTR_ERR(trng->base); return PTR_ERR(trng->base); } trng->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(trng->clk)) { - ret = PTR_ERR(trng->clk); pr_crit("%s: Cannot get DTRNG clock\n", __func__); return PTR_ERR(trng->clk); } From e4ab6e72e84d4cfd6f8e5f477204f3053a79108f Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 10 Jul 2023 22:27:30 +0200 Subject: [PATCH 048/149] hwrng: ingenic - use devm_clk_get_enabled Use devm_clk_get_enabled in the ingenic-trng driver. We don't have to disable and unprepare the clock any more in error paths or in the remove function. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ingenic-trng.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c index bfec28ceab00..906fa78de47e 100644 --- a/drivers/char/hw_random/ingenic-trng.c +++ b/drivers/char/hw_random/ingenic-trng.c @@ -29,7 +29,6 @@ struct ingenic_trng { void __iomem *base; - struct clk *clk; struct hwrng rng; }; @@ -77,6 +76,7 @@ static int ingenic_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait static int ingenic_trng_probe(struct platform_device *pdev) { struct ingenic_trng *trng; + struct clk *clk; int ret; trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); @@ -89,16 +89,10 @@ static int ingenic_trng_probe(struct platform_device *pdev) return PTR_ERR(trng->base); } - trng->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(trng->clk)) { - pr_crit("%s: Cannot get DTRNG clock\n", __func__); - return PTR_ERR(trng->clk); - } - - ret = clk_prepare_enable(trng->clk); - if (ret) { - pr_crit("%s: Unable to enable DTRNG clock\n", __func__); - return ret; + clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(clk)) { + pr_crit("%s: Cannot get and enable DTRNG clock\n", __func__); + return PTR_ERR(clk); } trng->rng.name = pdev->name; @@ -109,17 +103,13 @@ static int ingenic_trng_probe(struct platform_device *pdev) ret = hwrng_register(&trng->rng); if (ret) { dev_err(&pdev->dev, "Failed to register hwrng\n"); - goto err_unprepare_clk; + return ret; } platform_set_drvdata(pdev, trng); dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n"); return 0; - -err_unprepare_clk: - clk_disable_unprepare(trng->clk); - return ret; } static int ingenic_trng_remove(struct platform_device *pdev) @@ -133,8 +123,6 @@ static int ingenic_trng_remove(struct platform_device *pdev) ctrl &= ~CFG_GEN_EN; writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET); - clk_disable_unprepare(trng->clk); - return 0; } From 6257490b95252b0c22e4b518570d438cedc78a0d Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 10 Jul 2023 22:27:31 +0200 Subject: [PATCH 049/149] hwrng: ingenic - use dev_err_probe in error paths Use dev_err_probe in error paths to make the code a bit shorter. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ingenic-trng.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c index 906fa78de47e..cc88a941c929 100644 --- a/drivers/char/hw_random/ingenic-trng.c +++ b/drivers/char/hw_random/ingenic-trng.c @@ -84,16 +84,14 @@ static int ingenic_trng_probe(struct platform_device *pdev) return -ENOMEM; trng->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(trng->base)) { - pr_err("%s: Failed to map DTRNG registers\n", __func__); - return PTR_ERR(trng->base); - } + if (IS_ERR(trng->base)) + return dev_err_probe(&pdev->dev, PTR_ERR(trng->base), + "%s: Failed to map DTRNG registers\n", __func__); clk = devm_clk_get_enabled(&pdev->dev, NULL); - if (IS_ERR(clk)) { - pr_crit("%s: Cannot get and enable DTRNG clock\n", __func__); - return PTR_ERR(clk); - } + if (IS_ERR(clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(clk), + "%s: Cannot get and enable DTRNG clock\n", __func__); trng->rng.name = pdev->name; trng->rng.init = ingenic_trng_init; @@ -101,10 +99,8 @@ static int ingenic_trng_probe(struct platform_device *pdev) trng->rng.read = ingenic_trng_read; ret = hwrng_register(&trng->rng); - if (ret) { - dev_err(&pdev->dev, "Failed to register hwrng\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n"); platform_set_drvdata(pdev, trng); From 71839a641066881e032b6353516b16bdafbf606a Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 10 Jul 2023 22:27:32 +0200 Subject: [PATCH 050/149] hwrng: ingenic - don't disable the rng in ingenic_trng_remove There's no need to disable the rng in ingenic_trng_remove. The driver's init function sets the CFG_GEN_EN bit to enable the rng. The cleanup function clears CFG_GEN_EN to revert this. The remove function calls hwrng_unregister. If the ingenic-trng is not the current rng at this point, CFG_GEN_EN has already been cleared. If the ingenic-trng is the current rng, drop_current_rng will call the cleanup function. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ingenic-trng.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c index cc88a941c929..9c54721f8399 100644 --- a/drivers/char/hw_random/ingenic-trng.c +++ b/drivers/char/hw_random/ingenic-trng.c @@ -111,14 +111,9 @@ static int ingenic_trng_probe(struct platform_device *pdev) static int ingenic_trng_remove(struct platform_device *pdev) { struct ingenic_trng *trng = platform_get_drvdata(pdev); - unsigned int ctrl; hwrng_unregister(&trng->rng); - ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET); - ctrl &= ~CFG_GEN_EN; - writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET); - return 0; } From a40be5e89ff636533fc849f3db8964388c0ee8f1 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 10 Jul 2023 22:27:33 +0200 Subject: [PATCH 051/149] hwrng: ingenic - switch to device managed registration Call devm_hwrng_register for device managed registration of the ingenic-trng driver. ingenic_trng_remove can then be deleted. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ingenic-trng.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c index 9c54721f8399..3967a8dbe967 100644 --- a/drivers/char/hw_random/ingenic-trng.c +++ b/drivers/char/hw_random/ingenic-trng.c @@ -98,7 +98,7 @@ static int ingenic_trng_probe(struct platform_device *pdev) trng->rng.cleanup = ingenic_trng_cleanup; trng->rng.read = ingenic_trng_read; - ret = hwrng_register(&trng->rng); + ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n"); @@ -108,15 +108,6 @@ static int ingenic_trng_probe(struct platform_device *pdev) return 0; } -static int ingenic_trng_remove(struct platform_device *pdev) -{ - struct ingenic_trng *trng = platform_get_drvdata(pdev); - - hwrng_unregister(&trng->rng); - - return 0; -} - static const struct of_device_id ingenic_trng_of_match[] = { { .compatible = "ingenic,x1830-dtrng" }, { /* sentinel */ } @@ -125,7 +116,6 @@ MODULE_DEVICE_TABLE(of, ingenic_trng_of_match); static struct platform_driver ingenic_trng_driver = { .probe = ingenic_trng_probe, - .remove = ingenic_trng_remove, .driver = { .name = "ingenic-trng", .of_match_table = ingenic_trng_of_match, From e8c1fdcc62d373da38f673e474564fbed9e249dd Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Thu, 13 Jul 2023 09:04:44 +0200 Subject: [PATCH 052/149] hwrng: ba431 - do not set drvdata Do not set drvdata in the ba431 driver. Nobody is using it. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ba431-rng.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c index 5b7ca0416490..b1518dd52a24 100644 --- a/drivers/char/hw_random/ba431-rng.c +++ b/drivers/char/hw_random/ba431-rng.c @@ -189,8 +189,6 @@ static int ba431_trng_probe(struct platform_device *pdev) ba431->rng.cleanup = ba431_trng_cleanup; ba431->rng.read = ba431_trng_read; - platform_set_drvdata(pdev, ba431); - ret = devm_hwrng_register(&pdev->dev, &ba431->rng); if (ret) { dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); From 97b7aa77d1705ea25d484a77de44bf55d0a772a0 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Thu, 13 Jul 2023 09:04:45 +0200 Subject: [PATCH 053/149] hwrng: ba431 - don't init of_device_id's data We have no device-specific data for silex-insight,ba431-rng. There's no need to set .data = NULL, this is the default. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ba431-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c index b1518dd52a24..d2a9d16323a6 100644 --- a/drivers/char/hw_random/ba431-rng.c +++ b/drivers/char/hw_random/ba431-rng.c @@ -201,7 +201,7 @@ static int ba431_trng_probe(struct platform_device *pdev) } static const struct of_device_id ba431_trng_dt_ids[] = { - { .compatible = "silex-insight,ba431-rng", .data = NULL }, + { .compatible = "silex-insight,ba431-rng" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids); From 1422e363516c8ac5183a72124bd860d796ebd9e7 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Thu, 13 Jul 2023 09:04:46 +0200 Subject: [PATCH 054/149] hwrng: ba431 - use dev_err_probe after failed registration Use dev_err_probe to print the error message after a failed hwrng registration. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/ba431-rng.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c index d2a9d16323a6..9de7466e6896 100644 --- a/drivers/char/hw_random/ba431-rng.c +++ b/drivers/char/hw_random/ba431-rng.c @@ -190,10 +190,8 @@ static int ba431_trng_probe(struct platform_device *pdev) ba431->rng.read = ba431_trng_read; ret = devm_hwrng_register(&pdev->dev, &ba431->rng); - if (ret) { - dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "BA431 registration failed\n"); dev_info(&pdev->dev, "BA431 TRNG registered\n"); From 0d51794386325e90f2215fe9b2f4cb7390029260 Mon Sep 17 00:00:00 2001 From: Lionel Debieve Date: Thu, 13 Jul 2023 17:15:12 +0200 Subject: [PATCH 055/149] dt-bindings: crypto: add new compatible for stm32-hash Add a new compatible for stm32mp13 support. Signed-off-by: Lionel Debieve Signed-off-by: Thomas Bourgoin Acked-by: Conor Dooley Signed-off-by: Herbert Xu --- Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml index b767ec72a999..ac480765cde0 100644 --- a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml +++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml @@ -20,6 +20,7 @@ properties: - stericsson,ux500-hash - st,stm32f456-hash - st,stm32f756-hash + - st,stm32mp13-hash reg: maxItems: 1 From b6248fb8b8324cfde5eefe2ead1ba3650452d410 Mon Sep 17 00:00:00 2001 From: Thomas Bourgoin Date: Thu, 13 Jul 2023 17:15:13 +0200 Subject: [PATCH 056/149] crypto: stm32 - add new algorithms support Add the all SHA-2 (up to 512) and SHA-3 algorithm support. Update compatible table to add stm32mp13. Signed-off-by: Thomas Bourgoin Signed-off-by: Herbert Xu --- drivers/crypto/stm32/Kconfig | 2 + drivers/crypto/stm32/stm32-hash.c | 676 ++++++++++++++++++++++++------ 2 files changed, 548 insertions(+), 130 deletions(-) diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig index 4fc581e9e595..49dfd161e9b9 100644 --- a/drivers/crypto/stm32/Kconfig +++ b/drivers/crypto/stm32/Kconfig @@ -16,6 +16,8 @@ config CRYPTO_DEV_STM32_HASH select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_SHA3 select CRYPTO_ENGINE help This enables support for the HASH hw accelerator which can be found diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index f0df32382719..30e42a402aa7 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #define HASH_CR 0x00 @@ -45,19 +46,11 @@ #define HASH_CR_DMAE BIT(3) #define HASH_CR_DATATYPE_POS 4 #define HASH_CR_MODE BIT(6) +#define HASH_CR_ALGO_POS 7 #define HASH_CR_MDMAT BIT(13) #define HASH_CR_DMAA BIT(14) #define HASH_CR_LKEY BIT(16) -#define HASH_CR_ALGO_SHA1 0x0 -#define HASH_CR_ALGO_MD5 0x80 -#define HASH_CR_ALGO_SHA224 0x40000 -#define HASH_CR_ALGO_SHA256 0x40080 - -#define HASH_CR_UX500_EMPTYMSG BIT(20) -#define HASH_CR_UX500_ALGO_SHA1 BIT(7) -#define HASH_CR_UX500_ALGO_SHA256 0x0 - /* Interrupt */ #define HASH_DINIE BIT(0) #define HASH_DCIE BIT(1) @@ -66,9 +59,6 @@ #define HASH_MASK_CALC_COMPLETION BIT(0) #define HASH_MASK_DATA_INPUT BIT(1) -/* Context swap register */ -#define HASH_CSR_REGISTER_NUMBER 54 - /* Status Flags */ #define HASH_SR_DATA_INPUT_READY BIT(0) #define HASH_SR_OUTPUT_READY BIT(1) @@ -79,6 +69,18 @@ #define HASH_STR_NBLW_MASK GENMASK(4, 0) #define HASH_STR_DCAL BIT(8) +/* HWCFGR Register */ +#define HASH_HWCFG_DMA_MASK GENMASK(3, 0) + +/* Context swap register */ +#define HASH_CSR_NB_SHA256_HMAC 54 +#define HASH_CSR_NB_SHA256 38 +#define HASH_CSR_NB_SHA512_HMAC 103 +#define HASH_CSR_NB_SHA512 91 +#define HASH_CSR_NB_SHA3_HMAC 88 +#define HASH_CSR_NB_SHA3 72 +#define HASH_CSR_NB_MAX HASH_CSR_NB_SHA512_HMAC + #define HASH_FLAGS_INIT BIT(0) #define HASH_FLAGS_OUTPUT_READY BIT(1) #define HASH_FLAGS_CPU BIT(2) @@ -87,20 +89,20 @@ #define HASH_FLAGS_HMAC_INIT BIT(5) #define HASH_FLAGS_HMAC_FINAL BIT(6) #define HASH_FLAGS_HMAC_KEY BIT(7) - +#define HASH_FLAGS_SHA3_MODE BIT(8) #define HASH_FLAGS_FINAL BIT(15) #define HASH_FLAGS_FINUP BIT(16) -#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18) -#define HASH_FLAGS_MD5 BIT(18) -#define HASH_FLAGS_SHA1 BIT(19) -#define HASH_FLAGS_SHA224 BIT(20) -#define HASH_FLAGS_SHA256 BIT(21) +#define HASH_FLAGS_ALGO_MASK GENMASK(20, 17) +#define HASH_FLAGS_ALGO_SHIFT 17 +#define HASH_FLAGS_ERRORS BIT(21) #define HASH_FLAGS_EMPTY BIT(22) #define HASH_FLAGS_HMAC BIT(23) #define HASH_OP_UPDATE 1 #define HASH_OP_FINAL 2 +#define HASH_BURST_LEVEL 4 + enum stm32_hash_data_format { HASH_DATA_32_BITS = 0x0, HASH_DATA_16_BITS = 0x1, @@ -108,11 +110,26 @@ enum stm32_hash_data_format { HASH_DATA_1_BIT = 0x3 }; -#define HASH_BUFLEN 256 -#define HASH_LONG_KEY 64 -#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8) -#define HASH_QUEUE_LENGTH 16 -#define HASH_DMA_THRESHOLD 50 +#define HASH_BUFLEN (SHA3_224_BLOCK_SIZE + 4) +#define HASH_MAX_KEY_SIZE (SHA512_BLOCK_SIZE * 8) + +enum stm32_hash_algo { + HASH_SHA1 = 0, + HASH_MD5 = 1, + HASH_SHA224 = 2, + HASH_SHA256 = 3, + HASH_SHA3_224 = 4, + HASH_SHA3_256 = 5, + HASH_SHA3_384 = 6, + HASH_SHA3_512 = 7, + HASH_SHA384 = 12, + HASH_SHA512 = 15, +}; + +enum ux500_hash_algo { + HASH_SHA256_UX500 = 0, + HASH_SHA1_UX500 = 1, +}; #define HASH_AUTOSUSPEND_DELAY 50 @@ -130,19 +147,19 @@ struct stm32_hash_state { u32 flags; u16 bufcnt; - u16 buflen; + u16 blocklen; u8 buffer[HASH_BUFLEN] __aligned(4); /* hash state */ - u32 hw_context[3 + HASH_CSR_REGISTER_NUMBER]; + u32 hw_context[3 + HASH_CSR_NB_MAX]; }; struct stm32_hash_request_ctx { struct stm32_hash_dev *hdev; unsigned long op; - u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); size_t digcnt; /* DMA */ @@ -166,12 +183,13 @@ struct stm32_hash_algs_info { }; struct stm32_hash_pdata { - struct stm32_hash_algs_info *algs_info; - size_t algs_info_size; - bool has_sr; - bool has_mdmat; - bool broken_emptymsg; - bool ux500; + const int alg_shift; + const struct stm32_hash_algs_info *algs_info; + size_t algs_info_size; + bool has_sr; + bool has_mdmat; + bool broken_emptymsg; + bool ux500; }; struct stm32_hash_dev { @@ -182,7 +200,6 @@ struct stm32_hash_dev { void __iomem *io_base; phys_addr_t phys_base; u32 dma_mode; - u32 dma_maxburst; bool polled; struct ahash_request *req; @@ -275,31 +292,19 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt) struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct stm32_hash_state *state = &rctx->state; + u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT; u32 reg = HASH_CR_INIT; if (!(hdev->flags & HASH_FLAGS_INIT)) { - switch (state->flags & HASH_FLAGS_ALGO_MASK) { - case HASH_FLAGS_MD5: - reg |= HASH_CR_ALGO_MD5; - break; - case HASH_FLAGS_SHA1: - if (hdev->pdata->ux500) - reg |= HASH_CR_UX500_ALGO_SHA1; + if (hdev->pdata->ux500) { + reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS); + } else { + if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS) + reg |= ((alg & BIT(1)) << 17) | + ((alg & BIT(0)) << HASH_CR_ALGO_POS); else - reg |= HASH_CR_ALGO_SHA1; - break; - case HASH_FLAGS_SHA224: - reg |= HASH_CR_ALGO_SHA224; - break; - case HASH_FLAGS_SHA256: - if (hdev->pdata->ux500) - reg |= HASH_CR_UX500_ALGO_SHA256; - else - reg |= HASH_CR_ALGO_SHA256; - break; - default: - reg |= HASH_CR_ALGO_MD5; + reg |= alg << hdev->pdata->alg_shift; } reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); @@ -307,7 +312,7 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt) if (state->flags & HASH_FLAGS_HMAC) { hdev->flags |= HASH_FLAGS_HMAC; reg |= HASH_CR_MODE; - if (ctx->keylen > HASH_LONG_KEY) + if (ctx->keylen > crypto_ahash_blocksize(tfm)) reg |= HASH_CR_LKEY; } @@ -318,6 +323,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt) hdev->flags |= HASH_FLAGS_INIT; + /* + * After first block + 1 words are fill up, + * we only need to fill 1 block to start partial computation + */ + rctx->state.blocklen -= sizeof(u32); + dev_dbg(hdev->dev, "Write Control %x\n", reg); } } @@ -327,9 +338,9 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) struct stm32_hash_state *state = &rctx->state; size_t count; - while ((state->bufcnt < state->buflen) && rctx->total) { + while ((state->bufcnt < state->blocklen) && rctx->total) { count = min(rctx->sg->length - rctx->offset, rctx->total); - count = min_t(size_t, count, state->buflen - state->bufcnt); + count = min_t(size_t, count, state->blocklen - state->bufcnt); if (count <= 0) { if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { @@ -419,20 +430,59 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, return 0; } +static int hash_swap_reg(struct stm32_hash_request_ctx *rctx) +{ + struct stm32_hash_state *state = &rctx->state; + + switch ((state->flags & HASH_FLAGS_ALGO_MASK) >> + HASH_FLAGS_ALGO_SHIFT) { + case HASH_MD5: + case HASH_SHA1: + case HASH_SHA224: + case HASH_SHA256: + if (state->flags & HASH_FLAGS_HMAC) + return HASH_CSR_NB_SHA256_HMAC; + else + return HASH_CSR_NB_SHA256; + break; + + case HASH_SHA384: + case HASH_SHA512: + if (state->flags & HASH_FLAGS_HMAC) + return HASH_CSR_NB_SHA512_HMAC; + else + return HASH_CSR_NB_SHA512; + break; + + case HASH_SHA3_224: + case HASH_SHA3_256: + case HASH_SHA3_384: + case HASH_SHA3_512: + if (state->flags & HASH_FLAGS_HMAC) + return HASH_CSR_NB_SHA3_HMAC; + else + return HASH_CSR_NB_SHA3; + break; + + default: + return -EINVAL; + } +} + static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct stm32_hash_state *state = &rctx->state; u32 *preg = state->hw_context; int bufcnt, err = 0, final; - int i; + int i, swap_reg; dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags); final = state->flags & HASH_FLAGS_FINAL; - while ((rctx->total >= state->buflen) || - (state->bufcnt + rctx->total >= state->buflen)) { + while ((rctx->total >= state->blocklen) || + (state->bufcnt + rctx->total >= state->blocklen)) { stm32_hash_append_sg(rctx); bufcnt = state->bufcnt; state->bufcnt = 0; @@ -455,11 +505,13 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; + swap_reg = hash_swap_reg(rctx); + if (!hdev->pdata->ux500) *preg++ = stm32_hash_read(hdev, HASH_IMR); *preg++ = stm32_hash_read(hdev, HASH_STR); *preg++ = stm32_hash_read(hdev, HASH_CR); - for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) + for (i = 0; i < swap_reg; i++) *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); state->flags |= HASH_FLAGS_INIT; @@ -544,7 +596,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); int err; - if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) { + if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) { err = stm32_hash_write_key(hdev); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; @@ -579,8 +631,8 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) dma_conf.direction = DMA_MEM_TO_DEV; dma_conf.dst_addr = hdev->phys_base + HASH_DIN; dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dma_conf.src_maxburst = hdev->dma_maxburst; - dma_conf.dst_maxburst = hdev->dma_maxburst; + dma_conf.src_maxburst = HASH_BURST_LEVEL; + dma_conf.dst_maxburst = HASH_BURST_LEVEL; dma_conf.device_fc = false; chan = dma_request_chan(hdev->dev, "in"); @@ -614,7 +666,6 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) rctx->total = hdev->req->nbytes; rctx->nents = sg_nents(rctx->sg); - if (rctx->nents < 0) return -EINVAL; @@ -718,11 +769,12 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) static bool stm32_hash_dma_aligned_data(struct ahash_request *req) { struct scatterlist *sg; + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); int i; - if (req->nbytes <= HASH_DMA_THRESHOLD) + if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen) return false; if (sg_nents(req->src) > 1) { @@ -748,31 +800,64 @@ static int stm32_hash_init(struct ahash_request *req) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); struct stm32_hash_state *state = &rctx->state; + bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE; rctx->hdev = hdev; state->flags = HASH_FLAGS_CPU; + if (sha3_mode) + state->flags |= HASH_FLAGS_SHA3_MODE; + rctx->digcnt = crypto_ahash_digestsize(tfm); switch (rctx->digcnt) { case MD5_DIGEST_SIZE: - state->flags |= HASH_FLAGS_MD5; + state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT; break; case SHA1_DIGEST_SIZE: - state->flags |= HASH_FLAGS_SHA1; + if (hdev->pdata->ux500) + state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT; break; case SHA224_DIGEST_SIZE: - state->flags |= HASH_FLAGS_SHA224; + if (sha3_mode) + state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT; break; case SHA256_DIGEST_SIZE: - state->flags |= HASH_FLAGS_SHA256; + if (sha3_mode) { + state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT; + } else { + if (hdev->pdata->ux500) + state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT; + } + break; + case SHA384_DIGEST_SIZE: + if (sha3_mode) + state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT; + break; + case SHA512_DIGEST_SIZE: + if (sha3_mode) + state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT; + else + state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT; break; default: return -EINVAL; } rctx->state.bufcnt = 0; - rctx->state.buflen = HASH_BUFLEN; + rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32); + if (rctx->state.blocklen > HASH_BUFLEN) { + dev_err(hdev->dev, "Error, block too large"); + return -EINVAL; + } rctx->total = 0; rctx->offset = 0; rctx->data_type = HASH_DATA_8_BITS; @@ -842,6 +927,7 @@ static void stm32_hash_emptymsg_fallback(struct ahash_request *req) static void stm32_hash_copy_hash(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_state *state = &rctx->state; struct stm32_hash_dev *hdev = rctx->hdev; @@ -851,22 +937,7 @@ static void stm32_hash_copy_hash(struct ahash_request *req) if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY)) return stm32_hash_emptymsg_fallback(req); - switch (state->flags & HASH_FLAGS_ALGO_MASK) { - case HASH_FLAGS_MD5: - hashsize = MD5_DIGEST_SIZE; - break; - case HASH_FLAGS_SHA1: - hashsize = SHA1_DIGEST_SIZE; - break; - case HASH_FLAGS_SHA224: - hashsize = SHA224_DIGEST_SIZE; - break; - case HASH_FLAGS_SHA256: - hashsize = SHA256_DIGEST_SIZE; - break; - default: - return; - } + hashsize = crypto_ahash_digestsize(tfm); for (i = 0; i < hashsize / sizeof(u32); i++) { if (hdev->pdata->ux500) @@ -881,6 +952,11 @@ static void stm32_hash_copy_hash(struct ahash_request *req) static int stm32_hash_finish(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + u32 reg; + + reg = stm32_hash_read(rctx->hdev, HASH_SR); + reg &= ~HASH_SR_OUTPUT_READY; + stm32_hash_write(rctx->hdev, HASH_SR, reg); if (!req->result) return -EINVAL; @@ -920,6 +996,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); struct stm32_hash_state *state = &rctx->state; + int swap_reg; int err = 0; if (!hdev) @@ -932,6 +1009,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) hdev->req = req; hdev->flags = 0; + swap_reg = hash_swap_reg(rctx); if (state->flags & HASH_FLAGS_INIT) { u32 *preg = rctx->state.hw_context; @@ -945,7 +1023,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) reg = *preg++ | HASH_CR_INIT; stm32_hash_write(hdev, HASH_CR, reg); - for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) + for (i = 0; i < swap_reg; i++) stm32_hash_write(hdev, HASH_CSR(i), *preg++); hdev->flags |= HASH_FLAGS_INIT; @@ -1000,7 +1078,7 @@ static int stm32_hash_update(struct ahash_request *req) rctx->sg = req->src; rctx->offset = 0; - if ((state->bufcnt + rctx->total < state->buflen)) { + if ((state->bufcnt + rctx->total < state->blocklen)) { stm32_hash_append_sg(rctx); return 0; } @@ -1102,8 +1180,7 @@ static int stm32_hash_init_fallback(struct crypto_tfm *tfm) return 0; } -static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, - const char *algs_hmac_name) +static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags) { struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1112,8 +1189,8 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, ctx->keylen = 0; - if (algs_hmac_name) - ctx->flags |= HASH_FLAGS_HMAC; + if (algs_flags) + ctx->flags |= algs_flags; ctx->enginectx.op.do_one_request = stm32_hash_one_request; @@ -1122,28 +1199,25 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, static int stm32_hash_cra_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, NULL); + return stm32_hash_cra_init_algs(tfm, 0); } -static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm) +static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, "md5"); + return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC); } -static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm) +static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, "sha1"); + return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE); } -static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm) +static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm) { - return stm32_hash_cra_init_algs(tfm, "sha224"); + return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE | + HASH_FLAGS_HMAC); } -static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm) -{ - return stm32_hash_cra_init_algs(tfm, "sha256"); -} static void stm32_hash_cra_exit(struct crypto_tfm *tfm) { @@ -1185,8 +1259,6 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) reg = stm32_hash_read(hdev, HASH_SR); if (reg & HASH_SR_OUTPUT_READY) { - reg &= ~HASH_SR_OUTPUT_READY; - stm32_hash_write(hdev, HASH_SR, reg); hdev->flags |= HASH_FLAGS_OUTPUT_READY; /* Disable IT*/ stm32_hash_write(hdev, HASH_IMR, 0); @@ -1244,12 +1316,12 @@ static struct ahash_alg algs_md5[] = { .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, - .cra_init = stm32_hash_cra_md5_init, + .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } - }, + } }; static struct ahash_alg algs_sha1[] = { @@ -1300,7 +1372,7 @@ static struct ahash_alg algs_sha1[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, - .cra_init = stm32_hash_cra_sha1_init, + .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } @@ -1356,7 +1428,7 @@ static struct ahash_alg algs_sha224[] = { .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, - .cra_init = stm32_hash_cra_sha224_init, + .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } @@ -1412,7 +1484,7 @@ static struct ahash_alg algs_sha256[] = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, - .cra_init = stm32_hash_cra_sha256_init, + .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } @@ -1420,6 +1492,330 @@ static struct ahash_alg algs_sha256[] = { }, }; +static struct ahash_alg algs_sha384_sha512[] = { + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha384", + .cra_driver_name = "stm32-sha384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .setkey = stm32_hash_setkey, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "stm32-hmac-sha384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "stm32-sha512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "stm32-hmac-sha512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, +}; + +static struct ahash_alg algs_sha3[] = { + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA3_224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-224", + .cra_driver_name = "stm32-sha3-224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA3_224_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-224)", + .cra_driver_name = "stm32-hmac-sha3-224", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA3_256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-256", + .cra_driver_name = "stm32-sha3-256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA3_256_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-256)", + .cra_driver_name = "stm32-hmac-sha3-256", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA3_384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-384", + .cra_driver_name = "stm32-sha3-384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA3_384_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-384)", + .cra_driver_name = "stm32-hmac-sha3-384", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .halg = { + .digestsize = SHA3_512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "sha3-512", + .cra_driver_name = "stm32-sha3-512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + }, + { + .init = stm32_hash_init, + .update = stm32_hash_update, + .final = stm32_hash_final, + .finup = stm32_hash_finup, + .digest = stm32_hash_digest, + .export = stm32_hash_export, + .import = stm32_hash_import, + .setkey = stm32_hash_setkey, + .halg = { + .digestsize = SHA3_512_DIGEST_SIZE, + .statesize = sizeof(struct stm32_hash_state), + .base = { + .cra_name = "hmac(sha3-512)", + .cra_driver_name = "stm32-hmac-sha3-512", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA3_512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct stm32_hash_ctx), + .cra_alignmask = 3, + .cra_init = stm32_hash_cra_sha3_hmac_init, + .cra_exit = stm32_hash_cra_exit, + .cra_module = THIS_MODULE, + } + } + } +}; + static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) { unsigned int i, j; @@ -1471,6 +1867,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = { }; static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = { + .alg_shift = 7, .algs_info = stm32_hash_algs_info_ux500, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500), .broken_emptymsg = true, @@ -1489,6 +1886,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = { }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = { + .alg_shift = 7, .algs_info = stm32_hash_algs_info_stm32f4, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4), .has_sr = true, @@ -1515,25 +1913,49 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = { }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = { + .alg_shift = 7, .algs_info = stm32_hash_algs_info_stm32f7, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7), .has_sr = true, .has_mdmat = true, }; +static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = { + { + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), + }, + { + .algs_list = algs_sha224, + .size = ARRAY_SIZE(algs_sha224), + }, + { + .algs_list = algs_sha256, + .size = ARRAY_SIZE(algs_sha256), + }, + { + .algs_list = algs_sha384_sha512, + .size = ARRAY_SIZE(algs_sha384_sha512), + }, + { + .algs_list = algs_sha3, + .size = ARRAY_SIZE(algs_sha3), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = { + .alg_shift = 17, + .algs_info = stm32_hash_algs_info_stm32mp13, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13), + .has_sr = true, + .has_mdmat = true, +}; + static const struct of_device_id stm32_hash_of_match[] = { - { - .compatible = "stericsson,ux500-hash", - .data = &stm32_hash_pdata_ux500, - }, - { - .compatible = "st,stm32f456-hash", - .data = &stm32_hash_pdata_stm32f4, - }, - { - .compatible = "st,stm32f756-hash", - .data = &stm32_hash_pdata_stm32f7, - }, + { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 }, + { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 }, + { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 }, + { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 }, {}, }; @@ -1548,12 +1970,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, return -EINVAL; } - if (of_property_read_u32(dev->of_node, "dma-maxburst", - &hdev->dma_maxburst)) { - dev_info(dev, "dma-maxburst not specified, using 0\n"); - hdev->dma_maxburst = 0; - } - return 0; } @@ -1663,7 +2079,7 @@ static int stm32_hash_probe(struct platform_device *pdev) /* FIXME: implement DMA mode for Ux500 */ hdev->dma_mode = 0; else - hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR); + hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK; /* Register algos */ ret = stm32_hash_register_algs(hdev); @@ -1772,6 +2188,6 @@ static struct platform_driver stm32_hash_driver = { module_platform_driver(stm32_hash_driver); -MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver"); +MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver"); MODULE_AUTHOR("Lionel Debieve "); MODULE_LICENSE("GPL v2"); From 0e99d38ff6ad4baf0c24ec0aca8a01c522ef4dcd Mon Sep 17 00:00:00 2001 From: Thomas Bourgoin Date: Thu, 13 Jul 2023 17:15:14 +0200 Subject: [PATCH 057/149] crypto: stm32 - remove bufcnt in stm32_hash_write_ctrl. Commit "crypto: stm32 - Fix empty message processing" remove the use of the argument bufcnt in stm32_hash_write_ctrl. Hence, we can remove it from the function prototype and simplify the function declaration. Signed-off-by: Thomas Bourgoin Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 30e42a402aa7..c179a6c1a457 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -286,7 +286,7 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev) return 0; } -static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt) +static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); @@ -395,7 +395,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, hdev->flags |= HASH_FLAGS_CPU; - stm32_hash_write_ctrl(hdev, length); + stm32_hash_write_ctrl(hdev); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; @@ -669,7 +669,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) if (rctx->nents < 0) return -EINVAL; - stm32_hash_write_ctrl(hdev, rctx->total); + stm32_hash_write_ctrl(hdev); if (hdev->flags & HASH_FLAGS_HMAC) { err = stm32_hash_hmac_dma_send(hdev); From d9c83f71eeceed2cb54bb78be84f2d4055fd9a1f Mon Sep 17 00:00:00 2001 From: Thomas Bourgoin Date: Thu, 13 Jul 2023 17:15:15 +0200 Subject: [PATCH 058/149] crypto: stm32 - fix loop iterating through scatterlist for DMA We were reading the length of the scatterlist sg after copying value of tsg inside. So we are using the size of the previous scatterlist and for the first one we are using an unitialised value. Fix this by copying tsg in sg[0] before reading the size. Fixes : 8a1012d3f2ab ("crypto: stm32 - Support for STM32 HASH module") Cc: stable@vger.kernel.org Signed-off-by: Thomas Bourgoin Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index c179a6c1a457..519fb716acee 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -678,9 +678,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) } for_each_sg(rctx->sg, tsg, rctx->nents, i) { + sg[0] = *tsg; len = sg->length; - sg[0] = *tsg; if (sg_is_last(sg)) { if (hdev->dma_mode == 1) { len = (ALIGN(sg->length, 16) - 16); From a10618f397062823a84cd4abedc51f46b9d4410f Mon Sep 17 00:00:00 2001 From: Thomas Bourgoin Date: Thu, 13 Jul 2023 17:15:16 +0200 Subject: [PATCH 059/149] crypto: stm32 - check request size and scatterlist size when using DMA. When we are sending the data to HASH with the DMA, we send all the data provided in the scatterlists of the request. But in some cases (ex : tcrypt performances tests), we should only send req->nbytes When iterating through the scatterlist we verify if it is the last scatterlist or if the number of bytes sent plus the data of the current scatterlist is superior of the total number of bytes to hash. Signed-off-by: Thomas Bourgoin Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 519fb716acee..701995a72e57 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -659,8 +659,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); u32 *buffer = (void *)rctx->state.buffer; struct scatterlist sg[1], *tsg; - int err = 0, len = 0, reg, ncp = 0; - unsigned int i; + int err = 0, reg, ncp = 0; + unsigned int i, len = 0, bufcnt = 0; + bool is_last = false; rctx->sg = hdev->req->src; rctx->total = hdev->req->nbytes; @@ -681,7 +682,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) sg[0] = *tsg; len = sg->length; - if (sg_is_last(sg)) { + if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) { + sg->length = rctx->total - bufcnt; + is_last = true; if (hdev->dma_mode == 1) { len = (ALIGN(sg->length, 16) - 16); @@ -707,13 +710,15 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) return -ENOMEM; } - err = stm32_hash_xmit_dma(hdev, sg, len, - !sg_is_last(sg)); + err = stm32_hash_xmit_dma(hdev, sg, len, !is_last); + bufcnt += sg[0].length; dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); if (err == -ENOMEM) return err; + if (is_last) + break; } if (hdev->dma_mode == 1) { From a4adfbc2544933ac12e7fbd50708290265546dbc Mon Sep 17 00:00:00 2001 From: Thomas Bourgoin Date: Thu, 13 Jul 2023 17:15:17 +0200 Subject: [PATCH 060/149] crypto: stm32 - fix MDMAT condition If IP has MDMAT support, set or reset the bit MDMAT in Control Register. Fixes: b56403a25af7 ("crypto: stm32/hash - Support Ux500 hash") Cc: stable@vger.kernel.org Reviewed-by: Linus Walleij Signed-off-by: Thomas Bourgoin Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 701995a72e57..a48e6a14da2e 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -544,7 +544,7 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, reg = stm32_hash_read(hdev, HASH_CR); - if (!hdev->pdata->has_mdmat) { + if (hdev->pdata->has_mdmat) { if (mdma) reg |= HASH_CR_MDMAT; else From 1e3b2e805587ab5aa3ecdfe6339ad120ceaef7d3 Mon Sep 17 00:00:00 2001 From: Thomas Bourgoin Date: Thu, 13 Jul 2023 17:15:18 +0200 Subject: [PATCH 061/149] crypto: stm32 - remove flag HASH_FLAGS_DMA_READY Remove flag HASH_FLAGS_DMA_READY as it can put the driver in a deadlock state. If the DMA automatically set the DCAL bit, the interrupt indicating the end of a computation can be raised before the DMA complete sequence. Signed-off-by: Thomas Bourgoin Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index a48e6a14da2e..88a186c3dd78 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -84,12 +84,11 @@ #define HASH_FLAGS_INIT BIT(0) #define HASH_FLAGS_OUTPUT_READY BIT(1) #define HASH_FLAGS_CPU BIT(2) -#define HASH_FLAGS_DMA_READY BIT(3) -#define HASH_FLAGS_DMA_ACTIVE BIT(4) -#define HASH_FLAGS_HMAC_INIT BIT(5) -#define HASH_FLAGS_HMAC_FINAL BIT(6) -#define HASH_FLAGS_HMAC_KEY BIT(7) -#define HASH_FLAGS_SHA3_MODE BIT(8) +#define HASH_FLAGS_DMA_ACTIVE BIT(3) +#define HASH_FLAGS_HMAC_INIT BIT(4) +#define HASH_FLAGS_HMAC_FINAL BIT(5) +#define HASH_FLAGS_HMAC_KEY BIT(6) +#define HASH_FLAGS_SHA3_MODE BIT(7) #define HASH_FLAGS_FINAL BIT(15) #define HASH_FLAGS_FINUP BIT(16) #define HASH_FLAGS_ALGO_MASK GENMASK(20, 17) @@ -585,8 +584,6 @@ static void stm32_hash_dma_callback(void *param) struct stm32_hash_dev *hdev = param; complete(&hdev->dma_completion); - - hdev->flags |= HASH_FLAGS_DMA_READY; } static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) @@ -1241,11 +1238,9 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; goto finish; } - } else if (HASH_FLAGS_DMA_READY & hdev->flags) { - if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { - hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; - goto finish; - } + } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { + hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; + goto finish; } return IRQ_HANDLED; From 5cd4ed98cfb743cd8f363ccc674313c14c17acd5 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 14 Jul 2023 19:41:35 +0800 Subject: [PATCH 062/149] crypto: hisilicon/qm - flush all work before driver removed Before removing the driver, flush inter-function communication work, and subsequent communication work is not processed. This prevents communication threads from accessing released memory. Fixes: ("crypto: hisilicon/qm - enable PF and VFs communication") Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index edc6fd44e7ca..81c21ca40375 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -954,6 +954,11 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data) if (!val) return IRQ_NONE; + if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { + dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); + return IRQ_HANDLED; + } + schedule_work(&qm->cmd_process); return IRQ_HANDLED; @@ -2743,6 +2748,9 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) test_bit(QM_RESETTING, &qm->misc_ctl)) msleep(WAIT_PERIOD); + if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) + flush_work(&qm->cmd_process); + udelay(REMOVE_WAIT_DELAY); } EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); From 4b3ee3ff2dd66b45dd5ec374a95af06eb26d35ac Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 14 Jul 2023 19:41:36 +0800 Subject: [PATCH 063/149] crypto: hisilicon/qm - stop function and write data to memory When the system is shut down, the process is killed, but the accelerator device does not stop executing the tasks. If the accelerator device still accesses the memory and writes back data to the memory after the memory is reclaimed by the system, an NFE error may occur. Therefore, before the system is shut down, the driver needs to stop the device and write data back to the memory. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 14 ++++++++------ include/linux/hisi_acc_qm.h | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 81c21ca40375..fdff87c09aaf 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1002,7 +1002,7 @@ static void qm_reset_function(struct hisi_qm *qm) return; } - ret = hisi_qm_stop(qm, QM_FLR); + ret = hisi_qm_stop(qm, QM_DOWN); if (ret) { dev_err(dev, "failed to stop qm when reset function\n"); goto clear_bit; @@ -3251,7 +3251,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) } if (qm->status.stop_reason == QM_SOFT_RESET || - qm->status.stop_reason == QM_FLR) { + qm->status.stop_reason == QM_DOWN) { hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); ret = qm_stop_started_qp(qm); if (ret < 0) { @@ -4547,11 +4547,11 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) qm_cmd_uninit(qm); - ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR); + ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); if (ret) pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); - ret = hisi_qm_stop(qm, QM_FLR); + ret = hisi_qm_stop(qm, QM_DOWN); if (ret) { pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); @@ -4649,9 +4649,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev) struct hisi_qm *qm = pci_get_drvdata(pdev); int ret; - ret = hisi_qm_stop(qm, QM_NORMAL); + ret = hisi_qm_stop(qm, QM_DOWN); if (ret) dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); + + hisi_qm_cache_wb(qm); } EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); @@ -4815,7 +4817,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) cmd = msg & QM_MB_CMD_DATA_MASK; switch (cmd) { case QM_PF_FLR_PREPARE: - qm_pf_reset_vf_process(qm, QM_FLR); + qm_pf_reset_vf_process(qm, QM_DOWN); break; case QM_PF_SRST_PREPARE: qm_pf_reset_vf_process(qm, QM_SOFT_RESET); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index a7d54d4d41fd..39fbfb4be944 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -104,7 +104,7 @@ enum qm_stop_reason { QM_NORMAL, QM_SOFT_RESET, - QM_FLR, + QM_DOWN, }; enum qm_state { From b925a0cc87a1b950bc87ebf869e6d2dff0839e5f Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 14 Jul 2023 19:41:37 +0800 Subject: [PATCH 064/149] crypto: hisilicon/qm - increase device doorbell timeout When both the accelerator device and SMMU are busy, the processing time of the doorbell may be prolonged. As a result, the doorbell may timeout, especially in the sva scenario. Therefore, the doorbell timeout is increased. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index fdff87c09aaf..a99fd589445c 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -88,6 +88,8 @@ #define QM_DB_PRIORITY_SHIFT_V1 48 #define QM_PAGE_SIZE 0x0034 #define QM_QP_DB_INTERVAL 0x10000 +#define QM_DB_TIMEOUT_CFG 0x100074 +#define QM_DB_TIMEOUT_SET 0x1fffff #define QM_MEM_START_INIT 0x100040 #define QM_MEM_INIT_DONE 0x100044 @@ -5381,6 +5383,8 @@ int hisi_qm_init(struct hisi_qm *qm) goto err_pci_init; if (qm->fun_type == QM_HW_PF) { + /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ + writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); qm_disable_clock_gate(qm); ret = qm_dev_mem_reset(qm); if (ret) { @@ -5548,6 +5552,8 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm) qm_cmd_init(qm); hisi_qm_dev_err_init(qm); + /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ + writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); qm_disable_clock_gate(qm); ret = qm_dev_mem_reset(qm); if (ret) From 391dde6e48ff84687395a0a4e84f7e1540301e4e Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 14 Jul 2023 19:41:38 +0800 Subject: [PATCH 065/149] crypto: hisilicon/hpre - enable sva error interrupt event Enable sva error interrupt event. When an error occurs on the sva module, the device reports an abnormal interrupt to the driver. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 5d0adfb54a34..39297ce70f44 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -209,7 +209,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, - {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE}, + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, @@ -275,6 +275,9 @@ static const struct hpre_hw_error hpre_hw_errors[] = { }, { .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set" + }, { + .int_msk = BIT(24), + .msg = "sva_int_set" }, { /* sentinel */ } From 28b776098379fb698702b972df159aeca30aa6e3 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 14 Jul 2023 22:01:04 -0700 Subject: [PATCH 066/149] crypto: x86/aesni - remove unused parameter to aes_set_key_common() The 'tfm' parameter to aes_set_key_common() is never used, so remove it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_glue.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index c4eea7e746e7..39d6a62ac627 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -229,8 +229,7 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) return (struct crypto_aes_ctx *)ALIGN(addr, align); } -static int aes_set_key_common(struct crypto_tfm *tfm, - struct crypto_aes_ctx *ctx, +static int aes_set_key_common(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) { int err; @@ -253,7 +252,8 @@ static int aes_set_key_common(struct crypto_tfm *tfm, static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { - return aes_set_key_common(tfm, aes_ctx(crypto_tfm_ctx(tfm)), in_key, key_len); + return aes_set_key_common(aes_ctx(crypto_tfm_ctx(tfm)), in_key, + key_len); } static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -285,8 +285,7 @@ static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int len) { - return aes_set_key_common(crypto_skcipher_tfm(tfm), - aes_ctx(crypto_skcipher_ctx(tfm)), key, len); + return aes_set_key_common(aes_ctx(crypto_skcipher_ctx(tfm)), key, len); } static int ecb_encrypt(struct skcipher_request *req) @@ -627,8 +626,7 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); - return aes_set_key_common(crypto_aead_tfm(aead), - &ctx->aes_key_expanded, key, key_len) ?: + return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?: rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); } @@ -893,14 +891,13 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key, keylen /= 2; /* first half of xts-key is for crypt */ - err = aes_set_key_common(crypto_skcipher_tfm(tfm), aes_ctx(ctx->raw_crypt_ctx), - key, keylen); + err = aes_set_key_common(aes_ctx(ctx->raw_crypt_ctx), key, keylen); if (err) return err; /* second half of xts-key is for tweak */ - return aes_set_key_common(crypto_skcipher_tfm(tfm), aes_ctx(ctx->raw_tweak_ctx), - key + keylen, keylen); + return aes_set_key_common(aes_ctx(ctx->raw_tweak_ctx), key + keylen, + keylen); } static int xts_crypt(struct skcipher_request *req, bool encrypt) @@ -1150,8 +1147,7 @@ static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, { struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead); - return aes_set_key_common(crypto_aead_tfm(aead), - &ctx->aes_key_expanded, key, key_len) ?: + return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?: rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); } From dd105461ad15ea930d88aec1e4fcfc1f3186da43 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Sun, 16 Jul 2023 16:11:12 +0200 Subject: [PATCH 067/149] hwrng: arm-smccc-trng - don't set drvdata Don't set drvdata, there's nobody who reads it. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/arm_smccc_trng.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c index e34c3ea692b6..7e954341b09f 100644 --- a/drivers/char/hw_random/arm_smccc_trng.c +++ b/drivers/char/hw_random/arm_smccc_trng.c @@ -105,8 +105,6 @@ static int smccc_trng_probe(struct platform_device *pdev) trng->name = "smccc_trng"; trng->read = smccc_trng_read; - platform_set_drvdata(pdev, trng); - return devm_hwrng_register(&pdev->dev, trng); } From e22471c2331c984ba48e89acd00d0ba1f60a3ea7 Mon Sep 17 00:00:00 2001 From: Jia Jie Ho Date: Mon, 17 Jul 2023 12:03:02 +0800 Subject: [PATCH 068/149] crypto: starfive - Add AES skcipher and aead support Adding AES skcipher and aead support to Starfive crypto module. Skcipher modes of operation include ecb, cbc, ctr, ofb, cfb. Aead modes include ccm and gcm. v1->v2: - Add include interrupt.h to fix compile error. (Herbert) Co-developed-by: Huan Feng Signed-off-by: Huan Feng Signed-off-by: Jia Jie Ho Signed-off-by: Herbert Xu --- drivers/crypto/starfive/Kconfig | 2 + drivers/crypto/starfive/Makefile | 2 +- drivers/crypto/starfive/jh7110-aes.c | 1034 +++++++++++++++++++++++++ drivers/crypto/starfive/jh7110-cryp.c | 36 +- drivers/crypto/starfive/jh7110-cryp.h | 65 ++ 5 files changed, 1132 insertions(+), 7 deletions(-) create mode 100644 drivers/crypto/starfive/jh7110-aes.c diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig index df745fcb09df..2cb192502c1b 100644 --- a/drivers/crypto/starfive/Kconfig +++ b/drivers/crypto/starfive/Kconfig @@ -12,6 +12,8 @@ config CRYPTO_DEV_JH7110 select CRYPTO_SHA512 select CRYPTO_SM3_GENERIC select CRYPTO_RSA + select CRYPTO_AES + select CRYPTO_CCM help Support for StarFive JH7110 crypto hardware acceleration engine. This module provides acceleration for public key algo, diff --git a/drivers/crypto/starfive/Makefile b/drivers/crypto/starfive/Makefile index 98b01d2f1ccf..8c137afe58ad 100644 --- a/drivers/crypto/starfive/Makefile +++ b/drivers/crypto/starfive/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_JH7110) += jh7110-crypto.o -jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o +jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o jh7110-aes.o diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c new file mode 100644 index 000000000000..04dd7958054f --- /dev/null +++ b/drivers/crypto/starfive/jh7110-aes.c @@ -0,0 +1,1034 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * StarFive AES acceleration driver + * + * Copyright (c) 2022 StarFive Technology + */ + +#include +#include +#include +#include +#include +#include "jh7110-cryp.h" + +#define STARFIVE_AES_REGS_OFFSET 0x100 +#define STARFIVE_AES_AESDIO0R (STARFIVE_AES_REGS_OFFSET + 0x0) +#define STARFIVE_AES_KEY0 (STARFIVE_AES_REGS_OFFSET + 0x4) +#define STARFIVE_AES_KEY1 (STARFIVE_AES_REGS_OFFSET + 0x8) +#define STARFIVE_AES_KEY2 (STARFIVE_AES_REGS_OFFSET + 0xC) +#define STARFIVE_AES_KEY3 (STARFIVE_AES_REGS_OFFSET + 0x10) +#define STARFIVE_AES_KEY4 (STARFIVE_AES_REGS_OFFSET + 0x14) +#define STARFIVE_AES_KEY5 (STARFIVE_AES_REGS_OFFSET + 0x18) +#define STARFIVE_AES_KEY6 (STARFIVE_AES_REGS_OFFSET + 0x1C) +#define STARFIVE_AES_KEY7 (STARFIVE_AES_REGS_OFFSET + 0x20) +#define STARFIVE_AES_CSR (STARFIVE_AES_REGS_OFFSET + 0x24) +#define STARFIVE_AES_IV0 (STARFIVE_AES_REGS_OFFSET + 0x28) +#define STARFIVE_AES_IV1 (STARFIVE_AES_REGS_OFFSET + 0x2C) +#define STARFIVE_AES_IV2 (STARFIVE_AES_REGS_OFFSET + 0x30) +#define STARFIVE_AES_IV3 (STARFIVE_AES_REGS_OFFSET + 0x34) +#define STARFIVE_AES_NONCE0 (STARFIVE_AES_REGS_OFFSET + 0x3C) +#define STARFIVE_AES_NONCE1 (STARFIVE_AES_REGS_OFFSET + 0x40) +#define STARFIVE_AES_NONCE2 (STARFIVE_AES_REGS_OFFSET + 0x44) +#define STARFIVE_AES_NONCE3 (STARFIVE_AES_REGS_OFFSET + 0x48) +#define STARFIVE_AES_ALEN0 (STARFIVE_AES_REGS_OFFSET + 0x4C) +#define STARFIVE_AES_ALEN1 (STARFIVE_AES_REGS_OFFSET + 0x50) +#define STARFIVE_AES_MLEN0 (STARFIVE_AES_REGS_OFFSET + 0x54) +#define STARFIVE_AES_MLEN1 (STARFIVE_AES_REGS_OFFSET + 0x58) +#define STARFIVE_AES_IVLEN (STARFIVE_AES_REGS_OFFSET + 0x5C) + +#define FLG_MODE_MASK GENMASK(2, 0) +#define FLG_ENCRYPT BIT(4) + +/* Misc */ +#define CCM_B0_ADATA 0x40 +#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) + +static inline int starfive_aes_wait_busy(struct starfive_cryp_dev *cryp) +{ + u32 status; + + return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status, + !(status & STARFIVE_AES_BUSY), 10, 100000); +} + +static inline int starfive_aes_wait_keydone(struct starfive_cryp_dev *cryp) +{ + u32 status; + + return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status, + (status & STARFIVE_AES_KEY_DONE), 10, 100000); +} + +static inline int starfive_aes_wait_gcmdone(struct starfive_cryp_dev *cryp) +{ + u32 status; + + return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status, + (status & STARFIVE_AES_GCM_DONE), 10, 100000); +} + +static inline int is_gcm(struct starfive_cryp_dev *cryp) +{ + return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM; +} + +static inline int is_encrypt(struct starfive_cryp_dev *cryp) +{ + return cryp->flags & FLG_ENCRYPT; +} + +static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mode) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + unsigned int value; + + switch (hw_mode) { + case STARFIVE_AES_MODE_GCM: + value = readl(ctx->cryp->base + STARFIVE_AES_CSR); + value |= STARFIVE_AES_GCM_START; + writel(value, cryp->base + STARFIVE_AES_CSR); + starfive_aes_wait_gcmdone(cryp); + break; + case STARFIVE_AES_MODE_CCM: + value = readl(ctx->cryp->base + STARFIVE_AES_CSR); + value |= STARFIVE_AES_CCM_START; + writel(value, cryp->base + STARFIVE_AES_CSR); + break; + } +} + +static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + + if (is_gcm(cryp)) + writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN); + else + writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN); +} + +static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + + writel(upper_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN0); + writel(lower_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN1); +} + +static inline void starfive_aes_set_mlen(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + + writel(upper_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN0); + writel(lower_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN1); +} + +static inline int starfive_aes_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (iv[0] < 1 || iv[0] > 7) + return -EINVAL; + + return 0; +} + +static int starfive_aes_write_iv(struct starfive_cryp_ctx *ctx, u32 *iv) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + + writel(iv[0], cryp->base + STARFIVE_AES_IV0); + writel(iv[1], cryp->base + STARFIVE_AES_IV1); + writel(iv[2], cryp->base + STARFIVE_AES_IV2); + + if (is_gcm(cryp)) { + if (starfive_aes_wait_gcmdone(cryp)) + return -ETIMEDOUT; + + return 0; + } + + writel(iv[3], cryp->base + STARFIVE_AES_IV3); + + return 0; +} + +static inline void starfive_aes_get_iv(struct starfive_cryp_dev *cryp, u32 *iv) +{ + iv[0] = readl(cryp->base + STARFIVE_AES_IV0); + iv[1] = readl(cryp->base + STARFIVE_AES_IV1); + iv[2] = readl(cryp->base + STARFIVE_AES_IV2); + iv[3] = readl(cryp->base + STARFIVE_AES_IV3); +} + +static inline void starfive_aes_write_nonce(struct starfive_cryp_ctx *ctx, u32 *nonce) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + + writel(nonce[0], cryp->base + STARFIVE_AES_NONCE0); + writel(nonce[1], cryp->base + STARFIVE_AES_NONCE1); + writel(nonce[2], cryp->base + STARFIVE_AES_NONCE2); + writel(nonce[3], cryp->base + STARFIVE_AES_NONCE3); +} + +static int starfive_aes_write_key(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + u32 *key = (u32 *)ctx->key; + + if (ctx->keylen >= AES_KEYSIZE_128) { + writel(key[0], cryp->base + STARFIVE_AES_KEY0); + writel(key[1], cryp->base + STARFIVE_AES_KEY1); + writel(key[2], cryp->base + STARFIVE_AES_KEY2); + writel(key[3], cryp->base + STARFIVE_AES_KEY3); + } + + if (ctx->keylen >= AES_KEYSIZE_192) { + writel(key[4], cryp->base + STARFIVE_AES_KEY4); + writel(key[5], cryp->base + STARFIVE_AES_KEY5); + } + + if (ctx->keylen >= AES_KEYSIZE_256) { + writel(key[6], cryp->base + STARFIVE_AES_KEY6); + writel(key[7], cryp->base + STARFIVE_AES_KEY7); + } + + if (starfive_aes_wait_keydone(cryp)) + return -ETIMEDOUT; + + return 0; +} + +static int starfive_aes_ccm_init(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE]; + unsigned int textlen; + + memcpy(iv, cryp->req.areq->iv, AES_BLOCK_SIZE); + memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); + + /* Build B0 */ + memcpy(b0, iv, AES_BLOCK_SIZE); + + b0[0] |= (8 * ((cryp->authsize - 2) / 2)); + + if (cryp->assoclen) + b0[0] |= CCM_B0_ADATA; + + textlen = cryp->total_in; + + b0[AES_BLOCK_SIZE - 2] = textlen >> 8; + b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF; + + starfive_aes_write_nonce(ctx, (u32 *)b0); + + return 0; +} + +static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_request_ctx *rctx = ctx->rctx; + struct starfive_cryp_dev *cryp = ctx->cryp; + u32 hw_mode; + + /* reset */ + rctx->csr.aes.v = 0; + rctx->csr.aes.aesrst = 1; + writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR); + + /* csr setup */ + hw_mode = cryp->flags & FLG_MODE_MASK; + + rctx->csr.aes.v = 0; + + switch (ctx->keylen) { + case AES_KEYSIZE_128: + rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_128; + break; + case AES_KEYSIZE_192: + rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_192; + break; + case AES_KEYSIZE_256: + rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_256; + break; + } + + rctx->csr.aes.mode = hw_mode; + rctx->csr.aes.cmode = !is_encrypt(cryp); + rctx->csr.aes.ie = 1; + + if (hw_mode == STARFIVE_AES_MODE_CFB || + hw_mode == STARFIVE_AES_MODE_OFB) + rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_128; + else + rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1; + + if (cryp->side_chan) { + rctx->csr.aes.delay_aes = 1; + rctx->csr.aes.vaes_start = 1; + } + + writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR); + + cryp->err = starfive_aes_write_key(ctx); + if (cryp->err) + return cryp->err; + + switch (hw_mode) { + case STARFIVE_AES_MODE_GCM: + starfive_aes_set_alen(ctx); + starfive_aes_set_mlen(ctx); + starfive_aes_set_ivlen(ctx); + starfive_aes_aead_hw_start(ctx, hw_mode); + starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv); + break; + case STARFIVE_AES_MODE_CCM: + starfive_aes_set_alen(ctx); + starfive_aes_set_mlen(ctx); + starfive_aes_ccm_init(ctx); + starfive_aes_aead_hw_start(ctx, hw_mode); + break; + case STARFIVE_AES_MODE_OFB: + case STARFIVE_AES_MODE_CFB: + case STARFIVE_AES_MODE_CBC: + case STARFIVE_AES_MODE_CTR: + starfive_aes_write_iv(ctx, (void *)cryp->req.sreq->iv); + break; + default: + break; + } + + return cryp->err; +} + +static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp) +{ + int i, start_addr; + + if (starfive_aes_wait_busy(cryp)) + return dev_err_probe(cryp->dev, -ETIMEDOUT, + "Timeout waiting for tag generation."); + + start_addr = STARFIVE_AES_NONCE0; + + if (is_gcm(cryp)) + for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4) + cryp->tag_out[i] = readl(cryp->base + start_addr); + else + for (i = 0; i < AES_BLOCK_32; i++) + cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R); + + if (is_encrypt(cryp)) { + scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1); + } else { + scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0); + + if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize)) + return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n"); + } + + return 0; +} + +static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp) +{ + union starfive_aes_csr csr; + int err = cryp->err; + + if (!err && cryp->authsize) + err = starfive_aes_read_authtag(cryp); + + if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC || + (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR)) + starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv); + + /* reset irq flags*/ + csr.v = 0; + csr.aesrst = 1; + writel(csr.v, cryp->base + STARFIVE_AES_CSR); + + if (cryp->authsize) + crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err); + else + crypto_finalize_skcipher_request(cryp->engine, cryp->req.sreq, + err); +} + +void starfive_aes_done_task(unsigned long param) +{ + struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param; + u32 block[AES_BLOCK_32]; + u32 stat; + int i; + + for (i = 0; i < AES_BLOCK_32; i++) + block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R); + + scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE, + cryp->total_out), 1); + + cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out); + + if (!cryp->total_out) { + starfive_aes_finish_req(cryp); + return; + } + + memset(block, 0, AES_BLOCK_SIZE); + scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, + cryp->total_in), 0); + cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); + + for (i = 0; i < AES_BLOCK_32; i++) + writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); + + stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); + stat &= ~STARFIVE_IE_MASK_AES_DONE; + writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); +} + +static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + struct starfive_cryp_request_ctx *rctx = ctx->rctx; + u32 *buffer; + int total_len, loop; + + total_len = ALIGN(cryp->assoclen, AES_BLOCK_SIZE) / sizeof(unsigned int); + buffer = (u32 *)rctx->adata; + + for (loop = 0; loop < total_len; loop += 4) { + writel(*buffer, cryp->base + STARFIVE_AES_NONCE0); + buffer++; + writel(*buffer, cryp->base + STARFIVE_AES_NONCE1); + buffer++; + writel(*buffer, cryp->base + STARFIVE_AES_NONCE2); + buffer++; + writel(*buffer, cryp->base + STARFIVE_AES_NONCE3); + buffer++; + } + + if (starfive_aes_wait_gcmdone(cryp)) + return dev_err_probe(cryp->dev, -ETIMEDOUT, + "Timeout processing gcm aad block"); + + return 0; +} + +static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx) +{ + struct starfive_cryp_dev *cryp = ctx->cryp; + struct starfive_cryp_request_ctx *rctx = ctx->rctx; + u32 *buffer; + u8 *ci; + int total_len, loop; + + total_len = cryp->assoclen; + + ci = rctx->adata; + writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R); + ci++; + writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R); + ci++; + total_len -= 2; + buffer = (u32 *)ci; + + for (loop = 0; loop < 3; loop++, buffer++) + writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R); + + total_len -= 12; + + while (total_len > 0) { + for (loop = 0; loop < AES_BLOCK_32; loop++, buffer++) + writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R); + + total_len -= AES_BLOCK_SIZE; + } + + if (starfive_aes_wait_busy(cryp)) + return dev_err_probe(cryp->dev, -ETIMEDOUT, + "Timeout processing ccm aad block"); + + return 0; +} + +static int starfive_aes_prepare_req(struct skcipher_request *req, + struct aead_request *areq) +{ + struct starfive_cryp_ctx *ctx; + struct starfive_cryp_request_ctx *rctx; + struct starfive_cryp_dev *cryp; + + if (!req && !areq) + return -EINVAL; + + ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) : + crypto_aead_ctx(crypto_aead_reqtfm(areq)); + + cryp = ctx->cryp; + rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq); + + if (req) { + cryp->req.sreq = req; + cryp->total_in = req->cryptlen; + cryp->total_out = req->cryptlen; + cryp->assoclen = 0; + cryp->authsize = 0; + } else { + cryp->req.areq = areq; + cryp->assoclen = areq->assoclen; + cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); + if (is_encrypt(cryp)) { + cryp->total_in = areq->cryptlen; + cryp->total_out = areq->cryptlen; + } else { + cryp->total_in = areq->cryptlen - cryp->authsize; + cryp->total_out = cryp->total_in; + } + } + + rctx->in_sg = req ? req->src : areq->src; + scatterwalk_start(&cryp->in_walk, rctx->in_sg); + + rctx->out_sg = req ? req->dst : areq->dst; + scatterwalk_start(&cryp->out_walk, rctx->out_sg); + + if (cryp->assoclen) { + rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL); + if (IS_ERR(rctx->adata)) + return dev_err_probe(cryp->dev, PTR_ERR(rctx->adata), + "Failed to alloc memory for adata"); + + scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0); + scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2); + } + + ctx->rctx = rctx; + + return starfive_aes_hw_init(ctx); +} + +static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *req = + container_of(areq, struct skcipher_request, base); + struct starfive_cryp_ctx *ctx = + crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct starfive_cryp_dev *cryp = ctx->cryp; + u32 block[AES_BLOCK_32]; + u32 stat; + int i; + + /* + * Write first plain/ciphertext block to start the module + * then let irq tasklet handle the rest of the data blocks. + */ + scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, + cryp->total_in), 0); + cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); + + for (i = 0; i < AES_BLOCK_32; i++) + writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); + + stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); + stat &= ~STARFIVE_IE_MASK_AES_DONE; + writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); + + return 0; +} + +static int starfive_aes_skcipher_prepare_req(struct crypto_engine *engine, + void *areq) +{ + struct skcipher_request *req = + container_of(areq, struct skcipher_request, base); + + return starfive_aes_prepare_req(req, NULL); +} + +static int starfive_aes_init_tfm(struct crypto_skcipher *tfm) +{ + struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->cryp = starfive_cryp_find_dev(ctx); + if (!ctx->cryp) + return -ENODEV; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + + sizeof(struct skcipher_request)); + + ctx->enginectx.op.do_one_request = starfive_aes_do_one_req; + ctx->enginectx.op.prepare_request = starfive_aes_skcipher_prepare_req; + ctx->enginectx.op.unprepare_request = NULL; + + return 0; +} + +static void starfive_aes_exit_tfm(struct crypto_skcipher *tfm) +{ + struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->enginectx.op.do_one_request = NULL; + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; +} + +static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = + container_of(areq, struct aead_request, base); + struct starfive_cryp_ctx *ctx = + crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct starfive_cryp_dev *cryp = ctx->cryp; + struct starfive_cryp_request_ctx *rctx = ctx->rctx; + u32 block[AES_BLOCK_32]; + u32 stat; + int i; + + if (!cryp->assoclen) + goto write_text; + + if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM) + cryp->err = starfive_aes_ccm_write_adata(ctx); + else + cryp->err = starfive_aes_gcm_write_adata(ctx); + + kfree(rctx->adata); + + if (cryp->err) + return cryp->err; + +write_text: + if (!cryp->total_in) + goto finish_req; + + /* + * Write first plain/ciphertext block to start the module + * then let irq tasklet handle the rest of the data blocks. + */ + scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, + cryp->total_in), 0); + cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); + + for (i = 0; i < AES_BLOCK_32; i++) + writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); + + stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); + stat &= ~STARFIVE_IE_MASK_AES_DONE; + writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); + + return 0; + +finish_req: + starfive_aes_finish_req(cryp); + return 0; +} + +static int starfive_aes_aead_prepare_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = + container_of(areq, struct aead_request, base); + + return starfive_aes_prepare_req(NULL, req); +} + +static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm) +{ + struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); + struct starfive_cryp_dev *cryp = ctx->cryp; + struct crypto_tfm *aead = crypto_aead_tfm(tfm); + struct crypto_alg *alg = aead->__crt_alg; + + ctx->cryp = starfive_cryp_find_dev(ctx); + if (!ctx->cryp) + return -ENODEV; + + if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { + ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->aead_fbk)) + return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk), + "%s() failed to allocate fallback for %s\n", + __func__, alg->cra_name); + } + + crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) + + sizeof(struct aead_request)); + + ctx->enginectx.op.do_one_request = starfive_aes_aead_do_one_req; + ctx->enginectx.op.prepare_request = starfive_aes_aead_prepare_req; + ctx->enginectx.op.unprepare_request = NULL; + + return 0; +} + +static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm) +{ + struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); + + if (ctx->aead_fbk) { + crypto_free_aead(ctx->aead_fbk); + ctx->aead_fbk = NULL; + } + + ctx->enginectx.op.do_one_request = NULL; + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; +} + +static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct starfive_cryp_dev *cryp = ctx->cryp; + unsigned int blocksize_align = crypto_skcipher_blocksize(tfm) - 1; + + cryp->flags = flags; + + if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_ECB || + (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC) + if (req->cryptlen & blocksize_align) + return -EINVAL; + + return crypto_transfer_skcipher_request_to_engine(cryp->engine, req); +} + +static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags) +{ + struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct starfive_cryp_dev *cryp = ctx->cryp; + + cryp->flags = flags; + + /* + * HW engine could not perform CCM tag verification on + * non-blocksize aligned text, use fallback algo instead + */ + if (ctx->aead_fbk && !is_encrypt(cryp)) { + struct aead_request *subreq = aead_request_ctx(req); + + aead_request_set_tfm(subreq, ctx->aead_fbk); + aead_request_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, + req->dst, req->cryptlen, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + return crypto_aead_decrypt(subreq); + } + + return crypto_transfer_aead_request_to_engine(cryp->engine, req); +} + +static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (!key || !keylen) + return -EINVAL; + + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); + + if (!key || !keylen) + return -EINVAL; + + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + if (ctx->aead_fbk) + return crypto_aead_setkey(ctx->aead_fbk, key, keylen); + + return 0; +} + +static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + return crypto_gcm_check_authsize(authsize); +} + +static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); + + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + + return crypto_aead_setauthsize(ctx->aead_fbk, authsize); +} + +static int starfive_aes_ecb_encrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB | FLG_ENCRYPT); +} + +static int starfive_aes_ecb_decrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB); +} + +static int starfive_aes_cbc_encrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC | FLG_ENCRYPT); +} + +static int starfive_aes_cbc_decrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC); +} + +static int starfive_aes_cfb_encrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB | FLG_ENCRYPT); +} + +static int starfive_aes_cfb_decrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB); +} + +static int starfive_aes_ofb_encrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB | FLG_ENCRYPT); +} + +static int starfive_aes_ofb_decrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB); +} + +static int starfive_aes_ctr_encrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR | FLG_ENCRYPT); +} + +static int starfive_aes_ctr_decrypt(struct skcipher_request *req) +{ + return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR); +} + +static int starfive_aes_gcm_encrypt(struct aead_request *req) +{ + return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM | FLG_ENCRYPT); +} + +static int starfive_aes_gcm_decrypt(struct aead_request *req) +{ + return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM); +} + +static int starfive_aes_ccm_encrypt(struct aead_request *req) +{ + int ret; + + ret = starfive_aes_ccm_check_iv(req->iv); + if (ret) + return ret; + + return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM | FLG_ENCRYPT); +} + +static int starfive_aes_ccm_decrypt(struct aead_request *req) +{ + int ret; + + ret = starfive_aes_ccm_check_iv(req->iv); + if (ret) + return ret; + + return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM); +} + +static struct skcipher_alg skcipher_algs[] = { +{ + .init = starfive_aes_init_tfm, + .exit = starfive_aes_exit_tfm, + .setkey = starfive_aes_setkey, + .encrypt = starfive_aes_ecb_encrypt, + .decrypt = starfive_aes_ecb_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "starfive-ecb-aes", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, { + .init = starfive_aes_init_tfm, + .exit = starfive_aes_exit_tfm, + .setkey = starfive_aes_setkey, + .encrypt = starfive_aes_cbc_encrypt, + .decrypt = starfive_aes_cbc_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "starfive-cbc-aes", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, { + .init = starfive_aes_init_tfm, + .exit = starfive_aes_exit_tfm, + .setkey = starfive_aes_setkey, + .encrypt = starfive_aes_ctr_encrypt, + .decrypt = starfive_aes_ctr_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "starfive-ctr-aes", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, { + .init = starfive_aes_init_tfm, + .exit = starfive_aes_exit_tfm, + .setkey = starfive_aes_setkey, + .encrypt = starfive_aes_cfb_encrypt, + .decrypt = starfive_aes_cfb_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cfb(aes)", + .cra_driver_name = "starfive-cfb-aes", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, { + .init = starfive_aes_init_tfm, + .exit = starfive_aes_exit_tfm, + .setkey = starfive_aes_setkey, + .encrypt = starfive_aes_ofb_encrypt, + .decrypt = starfive_aes_ofb_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ofb(aes)", + .cra_driver_name = "starfive-ofb-aes", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, +}; + +static struct aead_alg aead_algs[] = { +{ + .setkey = starfive_aes_aead_setkey, + .setauthsize = starfive_aes_gcm_setauthsize, + .encrypt = starfive_aes_gcm_encrypt, + .decrypt = starfive_aes_gcm_decrypt, + .init = starfive_aes_aead_init_tfm, + .exit = starfive_aes_aead_exit_tfm, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "starfive-gcm-aes", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, { + .setkey = starfive_aes_aead_setkey, + .setauthsize = starfive_aes_ccm_setauthsize, + .encrypt = starfive_aes_ccm_encrypt, + .decrypt = starfive_aes_ccm_decrypt, + .init = starfive_aes_aead_init_tfm, + .exit = starfive_aes_aead_exit_tfm, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "starfive-ccm-aes", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct starfive_cryp_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}, +}; + +int starfive_aes_register_algs(void) +{ + int ret; + + ret = crypto_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); + if (ret) + return ret; + + ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + if (ret) + crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); + + return ret; +} + +void starfive_aes_unregister_algs(void) +{ + crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); +} diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c index e573c097a4a0..ab37010ceb88 100644 --- a/drivers/crypto/starfive/jh7110-cryp.c +++ b/drivers/crypto/starfive/jh7110-cryp.c @@ -51,6 +51,13 @@ struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx) return cryp; } +static u16 side_chan; +module_param(side_chan, ushort, 0); +MODULE_PARM_DESC(side_chan, "Enable side channel mitigation for AES module.\n" + "Enabling this feature will reduce speed performance.\n" + " 0 - Disabled\n" + " other - Enabled"); + static int starfive_dma_init(struct starfive_cryp_dev *cryp) { dma_cap_mask_t mask; @@ -82,20 +89,26 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp) static irqreturn_t starfive_cryp_irq(int irq, void *priv) { u32 status; + u32 mask; struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv; + mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET); + if (status & STARFIVE_IE_FLAG_AES_DONE) { + mask |= STARFIVE_IE_MASK_AES_DONE; + writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); + tasklet_schedule(&cryp->aes_done); + } + if (status & STARFIVE_IE_FLAG_HASH_DONE) { - status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); - status |= STARFIVE_IE_MASK_HASH_DONE; - writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET); + mask |= STARFIVE_IE_MASK_HASH_DONE; + writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); tasklet_schedule(&cryp->hash_done); } if (status & STARFIVE_IE_FLAG_PKA_DONE) { - status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); - status |= STARFIVE_IE_MASK_PKA_DONE; - writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET); + mask |= STARFIVE_IE_MASK_PKA_DONE; + writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); complete(&cryp->pka_done); } @@ -121,10 +134,12 @@ static int starfive_cryp_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base), "Error remapping memory for platform device\n"); + tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp); tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp); cryp->phys_base = res->start; cryp->dma_maxburst = 32; + cryp->side_chan = side_chan; cryp->hclk = devm_clk_get(&pdev->dev, "hclk"); if (IS_ERR(cryp->hclk)) @@ -180,6 +195,10 @@ static int starfive_cryp_probe(struct platform_device *pdev) if (ret) goto err_engine_start; + ret = starfive_aes_register_algs(); + if (ret) + goto err_algs_aes; + ret = starfive_hash_register_algs(); if (ret) goto err_algs_hash; @@ -193,6 +212,8 @@ static int starfive_cryp_probe(struct platform_device *pdev) err_algs_rsa: starfive_hash_unregister_algs(); err_algs_hash: + starfive_aes_unregister_algs(); +err_algs_aes: crypto_engine_stop(cryp->engine); err_engine_start: crypto_engine_exit(cryp->engine); @@ -207,6 +228,7 @@ err_dma_init: clk_disable_unprepare(cryp->ahb); reset_control_assert(cryp->rst); + tasklet_kill(&cryp->aes_done); tasklet_kill(&cryp->hash_done); err_probe_defer: return ret; @@ -216,9 +238,11 @@ static void starfive_cryp_remove(struct platform_device *pdev) { struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev); + starfive_aes_unregister_algs(); starfive_hash_unregister_algs(); starfive_rsa_unregister_algs(); + tasklet_kill(&cryp->aes_done); tasklet_kill(&cryp->hash_done); crypto_engine_stop(cryp->engine); diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index 0cdcffc0d7d4..b6d809e8fe45 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -5,7 +5,9 @@ #include #include #include +#include +#include #include #include #include @@ -17,13 +19,56 @@ #define STARFIVE_DMA_IN_LEN_OFFSET 0x10 #define STARFIVE_DMA_OUT_LEN_OFFSET 0x14 +#define STARFIVE_IE_MASK_AES_DONE 0x1 #define STARFIVE_IE_MASK_HASH_DONE 0x4 #define STARFIVE_IE_MASK_PKA_DONE 0x8 +#define STARFIVE_IE_FLAG_AES_DONE 0x1 #define STARFIVE_IE_FLAG_HASH_DONE 0x4 #define STARFIVE_IE_FLAG_PKA_DONE 0x8 #define STARFIVE_MSG_BUFFER_SIZE SZ_16K #define MAX_KEY_SIZE SHA512_BLOCK_SIZE +#define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE +#define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE + +union starfive_aes_csr { + u32 v; + struct { + u32 cmode :1; +#define STARFIVE_AES_KEYMODE_128 0x0 +#define STARFIVE_AES_KEYMODE_192 0x1 +#define STARFIVE_AES_KEYMODE_256 0x2 + u32 keymode :2; +#define STARFIVE_AES_BUSY BIT(3) + u32 busy :1; + u32 done :1; +#define STARFIVE_AES_KEY_DONE BIT(5) + u32 krdy :1; + u32 aesrst :1; + u32 ie :1; +#define STARFIVE_AES_CCM_START BIT(8) + u32 ccm_start :1; +#define STARFIVE_AES_MODE_ECB 0x0 +#define STARFIVE_AES_MODE_CBC 0x1 +#define STARFIVE_AES_MODE_CFB 0x2 +#define STARFIVE_AES_MODE_OFB 0x3 +#define STARFIVE_AES_MODE_CTR 0x4 +#define STARFIVE_AES_MODE_CCM 0x5 +#define STARFIVE_AES_MODE_GCM 0x6 + u32 mode :3; +#define STARFIVE_AES_GCM_START BIT(12) + u32 gcm_start :1; +#define STARFIVE_AES_GCM_DONE BIT(13) + u32 gcm_done :1; + u32 delay_aes :1; + u32 vaes_start :1; + u32 rsvd_0 :8; +#define STARFIVE_AES_MODE_XFB_1 0x0 +#define STARFIVE_AES_MODE_XFB_128 0x5 + u32 stmode :3; + u32 rsvd_1 :5; + }; +}; union starfive_hash_csr { u32 v; @@ -116,6 +161,7 @@ struct starfive_cryp_ctx { struct starfive_rsa_key rsa_key; struct crypto_akcipher *akcipher_fbk; struct crypto_ahash *ahash_fbk; + struct crypto_aead *aead_fbk; }; struct starfive_cryp_dev { @@ -133,13 +179,26 @@ struct starfive_cryp_dev { struct dma_chan *rx; struct dma_slave_config cfg_in; struct dma_slave_config cfg_out; + struct scatter_walk in_walk; + struct scatter_walk out_walk; struct crypto_engine *engine; + struct tasklet_struct aes_done; struct tasklet_struct hash_done; struct completion pka_done; + size_t assoclen; + size_t total_in; + size_t total_out; + u32 tag_in[4]; + u32 tag_out[4]; + unsigned int authsize; + unsigned long flags; int err; + bool side_chan; union starfive_alg_cr alg_cr; union { struct ahash_request *hreq; + struct aead_request *areq; + struct skcipher_request *sreq; } req; }; @@ -147,6 +206,7 @@ struct starfive_cryp_request_ctx { union { union starfive_hash_csr hash; union starfive_pka_cacr pka; + union starfive_aes_csr aes; } csr; struct scatterlist *in_sg; @@ -157,6 +217,7 @@ struct starfive_cryp_request_ctx { unsigned int blksize; unsigned int digsize; unsigned long in_sg_len; + unsigned char *adata; u8 rsa_data[] __aligned(sizeof(u32)); }; @@ -168,5 +229,9 @@ void starfive_hash_unregister_algs(void); int starfive_rsa_register_algs(void); void starfive_rsa_unregister_algs(void); +int starfive_aes_register_algs(void); +void starfive_aes_unregister_algs(void); + void starfive_hash_done_task(unsigned long param); +void starfive_aes_done_task(unsigned long param); #endif From 9f3fa6bc4ff8515da1349c44a77e7327bd2f4788 Mon Sep 17 00:00:00 2001 From: Mahmoud Adam Date: Mon, 17 Jul 2023 12:55:09 +0000 Subject: [PATCH 069/149] KEYS: use kfree_sensitive with key key might contain private part of the key, so better use kfree_sensitive to free it Signed-off-by: Mahmoud Adam Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/public_key.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 773e159dbbcb..abeecb8329b3 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -42,7 +42,7 @@ static void public_key_describe(const struct key *asymmetric_key, void public_key_free(struct public_key *key) { if (key) { - kfree(key->key); + kfree_sensitive(key->key); kfree(key->params); kfree(key); } @@ -263,7 +263,7 @@ error_free_tfm: else crypto_free_akcipher(tfm); error_free_key: - kfree(key); + kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } @@ -369,7 +369,7 @@ error_free_tfm: else crypto_free_akcipher(tfm); error_free_key: - kfree(key); + kfree_sensitive(key); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } @@ -441,7 +441,7 @@ int public_key_verify_signature(const struct public_key *pkey, sig->digest, sig->digest_size); error_free_key: - kfree(key); + kfree_sensitive(key); error_free_tfm: crypto_free_sig(tfm); pr_devel("<==%s() = %d\n", __func__, ret); From 80e40fea8e2a7528b079ac1853da528c5cabf625 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Wed, 19 Jul 2023 09:18:03 +0200 Subject: [PATCH 070/149] hwrng: cctrng - don't open code init and exit functions Do not open code the init and exit functions of the cctrng driver. If we move the BUILD_BUG_ON checks into the probe function, we can use module_platform_driver and make the code shorter. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 302ffa354c2f..241da7e44675 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -492,6 +492,10 @@ static int cctrng_probe(struct platform_device *pdev) u32 val; int irq; + /* Compile time assertion checks */ + BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6); + BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0); + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; @@ -698,21 +702,7 @@ static struct platform_driver cctrng_driver = { .remove = cctrng_remove, }; -static int __init cctrng_mod_init(void) -{ - /* Compile time assertion checks */ - BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6); - BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0); - - return platform_driver_register(&cctrng_driver); -} -module_init(cctrng_mod_init); - -static void __exit cctrng_mod_exit(void) -{ - platform_driver_unregister(&cctrng_driver); -} -module_exit(cctrng_mod_exit); +module_platform_driver(cctrng_driver); /* Module description */ MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver"); From 8bcd9689384810bd1c610419563c827e23118e34 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Wed, 19 Jul 2023 09:18:04 +0200 Subject: [PATCH 071/149] hwrng: cctrng - let devres enable the clock Call devm_clk_get_optional_enabled in the cctrng driver. We don't have to disable and unprepare the clock any more in error paths or in the remove function. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 29 +++++------------------------ 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 241da7e44675..ac4a2fbc6a0f 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -460,30 +460,16 @@ static int cc_trng_clk_init(struct cctrng_drvdata *drvdata) { struct clk *clk; struct device *dev = &(drvdata->pdev->dev); - int rc = 0; - clk = devm_clk_get_optional(dev, NULL); + clk = devm_clk_get_optional_enabled(dev, NULL); if (IS_ERR(clk)) return dev_err_probe(dev, PTR_ERR(clk), - "Error getting clock\n"); + "Failed to get or enable the clock\n"); drvdata->clk = clk; - - rc = clk_prepare_enable(drvdata->clk); - if (rc) { - dev_err(dev, "Failed to enable clock\n"); - return rc; - } - return 0; } -static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata) -{ - clk_disable_unprepare(drvdata->clk); -} - - static int cctrng_probe(struct platform_device *pdev) { struct cctrng_drvdata *drvdata; @@ -545,7 +531,7 @@ static int cctrng_probe(struct platform_device *pdev) rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata); if (rc) { dev_err(dev, "Could not register to interrupt %d\n", irq); - goto post_clk_err; + return rc; } dev_dbg(dev, "Registered to IRQ: %d\n", irq); @@ -563,14 +549,14 @@ static int cctrng_probe(struct platform_device *pdev) rc = cc_trng_pm_init(drvdata); if (rc) { dev_err(dev, "cc_trng_pm_init failed\n"); - goto post_clk_err; + return rc; } /* increment device's usage counter */ rc = cc_trng_pm_get(dev); if (rc) { dev_err(dev, "cc_trng_pm_get returned %x\n", rc); - goto post_pm_err; + return rc; } /* set pending_hw to verify that HW won't be triggered from read */ @@ -597,9 +583,6 @@ static int cctrng_probe(struct platform_device *pdev) post_pm_err: cc_trng_pm_fini(drvdata); -post_clk_err: - cc_trng_clk_fini(drvdata); - return rc; } @@ -612,8 +595,6 @@ static int cctrng_remove(struct platform_device *pdev) cc_trng_pm_fini(drvdata); - cc_trng_clk_fini(drvdata); - dev_info(dev, "ARM cctrng device terminated\n"); return 0; From 80a34c037713b183032fad578cf5cb88b94298a8 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Wed, 19 Jul 2023 09:18:05 +0200 Subject: [PATCH 072/149] hwrng: cctrng - merge cc_trng_clk_init into its only caller cc_trng_clk_init is called only from the probe function. Merge the two functions, this saves some lines of code. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index ac4a2fbc6a0f..2a1887f6483f 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -455,21 +455,6 @@ static void cc_trng_startwork_handler(struct work_struct *w) cc_trng_hw_trigger(drvdata); } - -static int cc_trng_clk_init(struct cctrng_drvdata *drvdata) -{ - struct clk *clk; - struct device *dev = &(drvdata->pdev->dev); - - clk = devm_clk_get_optional_enabled(dev, NULL); - if (IS_ERR(clk)) - return dev_err_probe(dev, PTR_ERR(clk), - "Failed to get or enable the clock\n"); - - drvdata->clk = clk; - return 0; -} - static int cctrng_probe(struct platform_device *pdev) { struct cctrng_drvdata *drvdata; @@ -517,11 +502,10 @@ static int cctrng_probe(struct platform_device *pdev) return rc; } - rc = cc_trng_clk_init(drvdata); - if (rc) { - dev_err(dev, "cc_trng_clk_init failed\n"); - return rc; - } + drvdata->clk = devm_clk_get_optional_enabled(dev, NULL); + if (IS_ERR(drvdata->clk)) + return dev_err_probe(dev, PTR_ERR(drvdata->clk), + "Failed to get or enable the clock\n"); INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler); INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler); From 9553ae3497f0753b79efa54b1719a7117e10bdf4 Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Wed, 19 Jul 2023 09:18:06 +0200 Subject: [PATCH 073/149] hwrng: cctrng - use dev_err_probe in error paths Use dev_err_probe in error paths to make the code a bit shorter. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 2a1887f6483f..1abbff04a015 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -485,10 +485,8 @@ static int cctrng_probe(struct platform_device *pdev) drvdata->circ.buf = (char *)drvdata->data_buf; drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(drvdata->cc_base)) { - dev_err(dev, "Failed to ioremap registers"); - return PTR_ERR(drvdata->cc_base); - } + if (IS_ERR(drvdata->cc_base)) + return dev_err_probe(dev, PTR_ERR(drvdata->cc_base), "Failed to ioremap registers"); /* Then IRQ */ irq = platform_get_irq(pdev, 0); @@ -497,10 +495,8 @@ static int cctrng_probe(struct platform_device *pdev) /* parse sampling rate from device tree */ rc = cc_trng_parse_sampling_ratio(drvdata); - if (rc) { - dev_err(dev, "Failed to get legal sampling ratio for rosc\n"); - return rc; - } + if (rc) + return dev_err_probe(dev, rc, "Failed to get legal sampling ratio for rosc\n"); drvdata->clk = devm_clk_get_optional_enabled(dev, NULL); if (IS_ERR(drvdata->clk)) @@ -513,10 +509,8 @@ static int cctrng_probe(struct platform_device *pdev) /* register the driver isr function */ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata); - if (rc) { - dev_err(dev, "Could not register to interrupt %d\n", irq); - return rc; - } + if (rc) + return dev_err_probe(dev, rc, "Could not register to interrupt %d\n", irq); dev_dbg(dev, "Registered to IRQ: %d\n", irq); /* Clear all pending interrupts */ @@ -531,17 +525,13 @@ static int cctrng_probe(struct platform_device *pdev) /* init PM */ rc = cc_trng_pm_init(drvdata); - if (rc) { - dev_err(dev, "cc_trng_pm_init failed\n"); - return rc; - } + if (rc) + return dev_err_probe(dev, rc, "cc_trng_pm_init failed\n"); /* increment device's usage counter */ rc = cc_trng_pm_get(dev); - if (rc) { - dev_err(dev, "cc_trng_pm_get returned %x\n", rc); - return rc; - } + if (rc) + return dev_err_probe(dev, rc, "cc_trng_pm_get returned %x\n", rc); /* set pending_hw to verify that HW won't be triggered from read */ atomic_set(&drvdata->pending_hw, 1); From 5a3d66acf072ae78dfa06af712a42b24bef32c0f Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Fri, 21 Jul 2023 10:54:43 +0200 Subject: [PATCH 074/149] hwrng: cn10k - delete empty remove function The remove function is empty, we can delete it. It's ok for a PCI driver to have no remove function. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/cn10k-rng.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c index 0cd7e1a8e499..794ec77feb2c 100644 --- a/drivers/char/hw_random/cn10k-rng.c +++ b/drivers/char/hw_random/cn10k-rng.c @@ -213,11 +213,6 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; } -static void cn10k_rng_remove(struct pci_dev *pdev) -{ - /* Nothing to do */ -} - static const struct pci_device_id cn10k_rng_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */ {0,}, @@ -229,7 +224,6 @@ static struct pci_driver cn10k_rng_driver = { .name = "cn10k_rng", .id_table = cn10k_rng_id_table, .probe = cn10k_rng_probe, - .remove = cn10k_rng_remove, }; module_pci_driver(cn10k_rng_driver); From 81511798bdfb1a61f97569436789721aace942fb Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Fri, 21 Jul 2023 10:54:44 +0200 Subject: [PATCH 075/149] hwrng: cn10k - use dev_err_probe Use dev_err_probe in error paths of the probe function, making the code a tiny bit simpler. Signed-off-by: Martin Kaiser Signed-off-by: Herbert Xu --- drivers/char/hw_random/cn10k-rng.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c index 794ec77feb2c..31935316a160 100644 --- a/drivers/char/hw_random/cn10k-rng.c +++ b/drivers/char/hw_random/cn10k-rng.c @@ -187,10 +187,8 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, rng); rng->reg_base = pcim_iomap(pdev, 0, 0); - if (!rng->reg_base) { - dev_err(&pdev->dev, "Error while mapping CSRs, exiting\n"); - return -ENOMEM; - } + if (!rng->reg_base) + return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n"); rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "cn10k-rng-%s", dev_name(&pdev->dev)); @@ -205,10 +203,8 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id) reset_rng_health_state(rng); err = devm_hwrng_register(&pdev->dev, &rng->ops); - if (err) { - dev_err(&pdev->dev, "Could not register hwrng device.\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "Could not register hwrng device.\n"); return 0; } From 6df04505f3b285f3bcfe81cf0461339b0f4ed41d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 22 Jul 2023 16:53:58 +0200 Subject: [PATCH 076/149] crypto: caam - Use struct_size() Use struct_size() instead of hand-writing it, when allocating a structure with a flex array. This is less verbose, more robust and more informative. Signed-off-by: Christophe JAILLET Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 80deb003f0a5..9e5924e24f2e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -708,9 +708,8 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; struct ahash_edesc *edesc; - unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); - edesc = kzalloc(sizeof(*edesc) + sg_size, flags); + edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); if (!edesc) { dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); return NULL; From 3de0152bf26ff0c5083ef831ba7676fc4c92e63a Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 22 Jul 2023 16:53:59 +0200 Subject: [PATCH 077/149] crypto: caam - Remove messages related to memory allocation failure On memory allocation failure, the function calling stack is already logged. So there is no need to explicitly log an extra message. Remove them, ans simplify some code accordingly. Signed-off-by: Christophe JAILLET Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 9e5924e24f2e..641c3afd59ca 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -368,10 +368,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, int ret; desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL); - if (!desc) { - dev_err(jrdev, "unable to allocate key input memory\n"); + if (!desc) return -ENOMEM; - } init_job_desc(desc, 0); @@ -702,18 +700,14 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, int sg_num, u32 *sh_desc, dma_addr_t sh_desc_dma) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; struct ahash_edesc *edesc; edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); - if (!edesc) { - dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); + if (!edesc) return NULL; - } state->edesc = edesc; @@ -1908,10 +1902,8 @@ caam_hash_alloc(struct caam_hash_template *template, struct crypto_alg *alg; t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); - if (!t_alg) { - pr_err("failed to allocate t_alg\n"); + if (!t_alg) return ERR_PTR(-ENOMEM); - } t_alg->ahash_alg = template->template_ahash; halg = &t_alg->ahash_alg; From 9a6913feb46c601e0895fc9f89b715b90a4cbb87 Mon Sep 17 00:00:00 2001 From: Franck LENORMAND Date: Mon, 24 Jul 2023 08:52:29 +0200 Subject: [PATCH 078/149] crypto: caam - Change structure type representing DECO MID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The structure partid is not suitable to represent the DECO MID register. This patch replace partid by masterid which is more appropriate. Signed-off-by: Franck LENORMAND Signed-off-by: Horia Geantă Signed-off-by: Meenakshi Aggarwal Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/regs.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 189e74c21f0c..0f87bd365582 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -459,12 +459,6 @@ struct masterid { u32 liodn_ls; /* LIODN for non-sequence and seq access */ }; -/* Partition ID for DMA configuration */ -struct partid { - u32 rsvd1; - u32 pidr; /* partition ID, DECO */ -}; - /* RNGB test mode (replicated twice in some configurations) */ /* Padded out to 0x100 */ struct rngtst { @@ -590,7 +584,7 @@ struct caam_ctrl { u32 deco_rsr; /* DECORSR - Deco Request Source */ u32 rsvd11; u32 deco_rq; /* DECORR - DECO Request */ - struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ + struct masterid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ u32 rsvd5[22]; /* DECO Availability/Reset Section 120-3ff */ From 322d74752c28a71fbca3650b98c21c58d25414a8 Mon Sep 17 00:00:00 2001 From: Horia Geanta Date: Mon, 24 Jul 2023 08:52:30 +0200 Subject: [PATCH 079/149] crypto: caam - add power management support Add support for suspend and resume operation for PM in CAAM driver. When the CAAM goes in suspend, the hardware is considered to do nothing. On some platforms, the power of the CAAM is not turned off so it keeps its configuration. On other platforms, it doesn't so it is necessary to save the state of the CAAM: - JRs MID - Address of input and output rings Signed-off-by: Horia Geanta Signed-off-by: Victoria Milhoan Signed-off-by: Dan Douglass Signed-off-by: Vipul Kumar Signed-off-by: Franck LENORMAND Signed-off-by: Meenakshi Aggarwal Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 106 +++++++++++++++++++ drivers/crypto/caam/intern.h | 25 ++++- drivers/crypto/caam/jr.c | 193 +++++++++++++++++++++++++++++++---- drivers/crypto/caam/regs.h | 3 +- 4 files changed, 306 insertions(+), 21 deletions(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index ff9ddbbca377..a7a4583107f4 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -740,6 +740,109 @@ static int caam_ctrl_rng_init(struct device *dev) return 0; } +/* Indicate if the internal state of the CAAM is lost during PM */ +static int caam_off_during_pm(void) +{ + bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6qp") || + of_machine_is_compatible("fsl,imx6dl"); + + return not_off_during_pm ? 0 : 1; +} + +static void caam_state_save(struct device *dev) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); + struct caam_ctl_state *state = &ctrlpriv->state; + struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; + u32 deco_inst, jr_inst; + int i; + + state->mcr = rd_reg32(&ctrl->mcr); + state->scfgr = rd_reg32(&ctrl->scfgr); + + deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & + CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT; + for (i = 0; i < deco_inst; i++) { + state->deco_mid[i].liodn_ms = + rd_reg32(&ctrl->deco_mid[i].liodn_ms); + state->deco_mid[i].liodn_ls = + rd_reg32(&ctrl->deco_mid[i].liodn_ls); + } + + jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & + CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT; + for (i = 0; i < jr_inst; i++) { + state->jr_mid[i].liodn_ms = + rd_reg32(&ctrl->jr_mid[i].liodn_ms); + state->jr_mid[i].liodn_ls = + rd_reg32(&ctrl->jr_mid[i].liodn_ls); + } +} + +static void caam_state_restore(const struct device *dev) +{ + const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); + const struct caam_ctl_state *state = &ctrlpriv->state; + struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; + u32 deco_inst, jr_inst; + int i; + + wr_reg32(&ctrl->mcr, state->mcr); + wr_reg32(&ctrl->scfgr, state->scfgr); + + deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & + CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT; + for (i = 0; i < deco_inst; i++) { + wr_reg32(&ctrl->deco_mid[i].liodn_ms, + state->deco_mid[i].liodn_ms); + wr_reg32(&ctrl->deco_mid[i].liodn_ls, + state->deco_mid[i].liodn_ls); + } + + jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & + CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT; + for (i = 0; i < jr_inst; i++) { + wr_reg32(&ctrl->jr_mid[i].liodn_ms, + state->jr_mid[i].liodn_ms); + wr_reg32(&ctrl->jr_mid[i].liodn_ls, + state->jr_mid[i].liodn_ls); + } + + if (ctrlpriv->virt_en == 1) + clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START | + JRSTART_JR1_START | JRSTART_JR2_START | + JRSTART_JR3_START); +} + +static int caam_ctrl_suspend(struct device *dev) +{ + const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); + + if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) + caam_state_save(dev); + + return 0; +} + +static int caam_ctrl_resume(struct device *dev) +{ + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); + int ret = 0; + + if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) { + caam_state_restore(dev); + + /* HW and rng will be reset so deinstantiation can be removed */ + devm_remove_action(dev, devm_deinstantiate_rng, dev); + ret = caam_ctrl_rng_init(dev); + } + + return ret; +} + +static SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume); + /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { @@ -771,6 +874,8 @@ static int caam_probe(struct platform_device *pdev) caam_imx = (bool)imx_soc_match; + ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm(); + if (imx_soc_match) { /* * Until Layerscape and i.MX OP-TEE get in sync, @@ -1033,6 +1138,7 @@ static struct platform_driver caam_driver = { .driver = { .name = "caam", .of_match_table = caam_match, + .pm = &caam_ctrl_pm_ops, }, .probe = caam_probe, }; diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index b4f7bf77f487..e51320150872 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -4,7 +4,7 @@ * Private/internal definitions between modules * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2019 NXP + * Copyright 2019, 2023 NXP */ #ifndef INTERN_H @@ -47,6 +47,16 @@ struct caam_jrentry_info { u32 desc_size; /* Stored size for postprocessing, header derived */ }; +struct caam_jr_state { + dma_addr_t inpbusaddr; + dma_addr_t outbusaddr; +}; + +struct caam_jr_dequeue_params { + struct device *dev; + int enable_itr; +}; + /* Private sub-storage for a single JobR */ struct caam_drv_private_jr { struct list_head list_node; /* Job Ring device list */ @@ -54,6 +64,7 @@ struct caam_drv_private_jr { int ridx; struct caam_job_ring __iomem *rregs; /* JobR's register space */ struct tasklet_struct irqtask; + struct caam_jr_dequeue_params tasklet_params; int irq; /* One per queue */ bool hwrng; @@ -71,6 +82,15 @@ struct caam_drv_private_jr { int tail; /* entinfo (s/w ring) tail index */ void *outring; /* Base of output ring, DMA-safe */ struct crypto_engine *engine; + + struct caam_jr_state state; /* State of the JR during PM */ +}; + +struct caam_ctl_state { + struct masterid deco_mid[16]; + struct masterid jr_mid[4]; + u32 mcr; + u32 scfgr; }; /* @@ -116,6 +136,9 @@ struct caam_drv_private { struct dentry *ctl; /* controller dir */ struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; #endif + + int caam_off_during_pm; /* If the CAAM is reset after suspend */ + struct caam_ctl_state state; /* State of the CTL during PM */ }; #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 96dea5304d22..316180d26f8a 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -117,6 +117,23 @@ static int caam_jr_flush(struct device *dev) return caam_jr_stop_processing(dev, JRCR_RESET); } +/* The resume can be used after a park or a flush if CAAM has not been reset */ +static int caam_jr_restart_processing(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) & + JRINT_ERR_HALT_MASK; + + /* Check that the flush/park is completed */ + if (halt_status != JRINT_ERR_HALT_COMPLETE) + return -1; + + /* Resume processing of jobs */ + clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE); + + return 0; +} + static int caam_reset_hw_jr(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); @@ -245,7 +262,8 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) static void caam_jr_dequeue(unsigned long devarg) { int hw_idx, sw_idx, i, head, tail; - struct device *dev = (struct device *)devarg; + struct caam_jr_dequeue_params *params = (void *)devarg; + struct device *dev = params->dev; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); u32 *userdesc, userstatus; @@ -319,8 +337,9 @@ static void caam_jr_dequeue(unsigned long devarg) outring_used--; } - /* reenable / unmask IRQs */ - clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); + if (params->enable_itr) + /* reenable / unmask IRQs */ + clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); } /** @@ -470,6 +489,29 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, } EXPORT_SYMBOL(caam_jr_enqueue); +static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr, + dma_addr_t outbusaddr) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + + wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); + wr_reg64(&jrp->rregs->outring_base, outbusaddr); + wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); + wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); + + /* Select interrupt coalescing parameters */ + clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | + (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | + (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); +} + +static void caam_jr_reset_index(struct caam_drv_private_jr *jrp) +{ + jrp->out_ring_read_index = 0; + jrp->head = 0; + jrp->tail = 0; +} + /* * Init JobR independent of platform property detection */ @@ -506,25 +548,16 @@ static int caam_jr_init(struct device *dev) jrp->entinfo[i].desc_addr_dma = !0; /* Setup rings */ - jrp->out_ring_read_index = 0; - jrp->head = 0; - jrp->tail = 0; - - wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); - wr_reg64(&jrp->rregs->outring_base, outbusaddr); - wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); - wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); - + caam_jr_reset_index(jrp); jrp->inpring_avail = JOBR_DEPTH; + caam_jr_init_hw(dev, inpbusaddr, outbusaddr); spin_lock_init(&jrp->inplock); - /* Select interrupt coalescing parameters */ - clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | - (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | - (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); - - tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); + jrp->tasklet_params.dev = dev; + jrp->tasklet_params.enable_itr = 1; + tasklet_init(&jrp->irqtask, caam_jr_dequeue, + (unsigned long)&jrp->tasklet_params); /* Connect job ring interrupt handler. */ error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, @@ -635,11 +668,134 @@ static int caam_jr_probe(struct platform_device *pdev) atomic_set(&jrpriv->tfm_count, 0); + device_init_wakeup(&pdev->dev, 1); + device_set_wakeup_enable(&pdev->dev, false); + register_algs(jrpriv, jrdev->parent); return 0; } +static void caam_jr_get_hw_state(struct device *dev) +{ + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); + + jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); + jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base); +} + +static int caam_jr_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent); + struct caam_jr_dequeue_params suspend_params = { + .dev = dev, + .enable_itr = 0, + }; + + /* Remove the node from Physical JobR list maintained by driver */ + spin_lock(&driver_data.jr_alloc_lock); + list_del(&jrpriv->list_node); + spin_unlock(&driver_data.jr_alloc_lock); + + if (jrpriv->hwrng) + caam_rng_exit(dev->parent); + + if (ctrlpriv->caam_off_during_pm) { + int err; + + tasklet_disable(&jrpriv->irqtask); + + /* mask itr to call flush */ + clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK); + + /* Invalid job in process */ + err = caam_jr_flush(dev); + if (err) { + dev_err(dev, "Failed to flush\n"); + return err; + } + + /* Dequeing jobs flushed */ + caam_jr_dequeue((unsigned long)&suspend_params); + + /* Save state */ + caam_jr_get_hw_state(dev); + } else if (device_may_wakeup(&pdev->dev)) { + enable_irq_wake(jrpriv->irq); + } + + return 0; +} + +static int caam_jr_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev); + struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent); + + if (ctrlpriv->caam_off_during_pm) { + u64 inp_addr; + int err; + + /* + * Check if the CAAM has been resetted checking the address of + * the input ring + */ + inp_addr = rd_reg64(&jrpriv->rregs->inpring_base); + if (inp_addr != 0) { + /* JR still has some configuration */ + if (inp_addr == jrpriv->state.inpbusaddr) { + /* JR has not been resetted */ + err = caam_jr_restart_processing(dev); + if (err) { + dev_err(dev, + "Restart processing failed\n"); + return err; + } + + tasklet_enable(&jrpriv->irqtask); + + clrsetbits_32(&jrpriv->rregs->rconfig_lo, + JRCFG_IMSK, 0); + + goto add_jr; + } else if (ctrlpriv->optee_en) { + /* JR has been used by OPTEE, reset it */ + err = caam_reset_hw_jr(dev); + if (err) { + dev_err(dev, "Failed to reset JR\n"); + return err; + } + } else { + /* No explanation, return error */ + return -EIO; + } + } + + caam_jr_reset_index(jrpriv); + caam_jr_init_hw(dev, jrpriv->state.inpbusaddr, + jrpriv->state.outbusaddr); + + tasklet_enable(&jrpriv->irqtask); + } else if (device_may_wakeup(&pdev->dev)) { + disable_irq_wake(jrpriv->irq); + } + +add_jr: + spin_lock(&driver_data.jr_alloc_lock); + list_add_tail(&jrpriv->list_node, &driver_data.jr_list); + spin_unlock(&driver_data.jr_alloc_lock); + + if (jrpriv->hwrng) + jrpriv->hwrng = !caam_rng_init(dev->parent); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume); + static const struct of_device_id caam_jr_match[] = { { .compatible = "fsl,sec-v4.0-job-ring", @@ -655,6 +811,7 @@ static struct platform_driver caam_jr_driver = { .driver = { .name = "caam_jr", .of_match_table = caam_jr_match, + .pm = &caam_jr_pm_ops, }, .probe = caam_jr_probe, .remove = caam_jr_remove, diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 0f87bd365582..873df9de9890 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -584,8 +584,7 @@ struct caam_ctrl { u32 deco_rsr; /* DECORSR - Deco Request Source */ u32 rsvd11; u32 deco_rq; /* DECORR - DECO Request */ - struct masterid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */ - u32 rsvd5[22]; + struct masterid deco_mid[16]; /* DECOxLIODNR - 1 per DECO */ /* DECO Availability/Reset Section 120-3ff */ u32 deco_avail; /* DAR - DECO availability */ From 355bf65080399b0c1aba34368331802ce1998aef Mon Sep 17 00:00:00 2001 From: Wang Ming Date: Wed, 26 Jul 2023 20:04:41 +0800 Subject: [PATCH 080/149] crypto: atmel - Use dev_err_probe instead of dev_err It is possible that dma_request_chan will return EPROBE_DEFER, which means that dd->dev is not ready yet. In this case, dev_err(dd->dev), there will be no output. This patch fixes the bug. Signed-off-by: Wang Ming Reviewed-by: Ryan Wanner Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index f2031f934be9..54fec72dfba2 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2499,8 +2499,8 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd) { dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); if (IS_ERR(dd->dma_lch_in.chan)) { - dev_err(dd->dev, "DMA channel is not available\n"); - return PTR_ERR(dd->dma_lch_in.chan); + return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan), + "DMA channel is not available\n"); } dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + From 6a4b8aa0a916b39a39175584c07222434fa6c6ef Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 26 Jul 2023 22:53:19 +0100 Subject: [PATCH 081/149] crypto: af_alg - Fix missing initialisation affecting gcm-aes-s390 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix af_alg_alloc_areq() to initialise areq->first_rsgl.sgl.sgt.sgl to point to the scatterlist array in areq->first_rsgl.sgl.sgl. Without this, the gcm-aes-s390 driver will oops when it tries to do gcm_walk_start() on req->dst because req->dst is set to the value of areq->first_rsgl.sgl.sgl by _aead_recvmsg() calling aead_request_set_crypt(). The problem comes if an empty ciphertext is passed: the loop in af_alg_get_rsgl() just passes straight out and doesn't set areq->first_rsgl up. This isn't a problem on x86_64 using gcmaes_crypt_by_sg() because, as far as I can tell, that ignores req->dst and only uses req->src[*]. [*] Is this a bug in aesni-intel_glue.c? The s390x oops looks something like: Unable to handle kernel pointer dereference in virtual kernel address space Failing address: 0000000a00000000 TEID: 0000000a00000803 Fault in home space mode while using kernel ASCE. AS:00000000a43a0007 R3:0000000000000024 Oops: 003b ilc:2 [#1] SMP ... Call Trace: [<000003ff7fc3d47e>] gcm_walk_start+0x16/0x28 [aes_s390] [<00000000a2a342f2>] crypto_aead_decrypt+0x9a/0xb8 [<00000000a2a60888>] aead_recvmsg+0x478/0x698 [<00000000a2e519a0>] sock_recvmsg+0x70/0xb0 [<00000000a2e51a56>] sock_read_iter+0x76/0xa0 [<00000000a273e066>] vfs_read+0x26e/0x2a8 [<00000000a273e8c4>] ksys_read+0xbc/0x100 [<00000000a311d808>] __do_syscall+0x1d0/0x1f8 [<00000000a312ff30>] system_call+0x70/0x98 Last Breaking-Event-Address: [<000003ff7fc3e6b4>] gcm_aes_crypt+0x104/0xa68 [aes_s390] Fixes: c1abe6f570af ("crypto: af_alg: Use extract_iter_to_sg() to create scatterlists") Reported-by: Ondrej Mosnáček Link: https://lore.kernel.org/r/CAAUqJDuRkHE8fPgZJGaKjUjd3QfGwzfumuJBmStPqBhubxyk_A@mail.gmail.com/ Signed-off-by: David Howells cc: Herbert Xu cc: Sven Schnelle cc: Harald Freudenberger cc: "David S. Miller" cc: Paolo Abeni cc: linux-crypto@vger.kernel.org cc: linux-s390@vger.kernel.org cc: regressions@lists.linux.dev Tested-by: Sven Schnelle Tested-by: Ondrej Mosnáček Signed-off-by: Herbert Xu --- crypto/af_alg.c | 1 + 1 file changed, 1 insertion(+) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 06b15b9f661c..9ee8575d3b1a 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1192,6 +1192,7 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, areq->areqlen = areqlen; areq->sk = sk; + areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl; areq->last_rsgl = NULL; INIT_LIST_HEAD(&areq->rsgl_list); areq->tsgl = NULL; From 0788257aeebee9b8413c1c29e7d2029329b1a82e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 28 Jul 2023 07:48:27 -0600 Subject: [PATCH 082/149] hwrng: Explicitly include correct DT includes The DT of_device.h and of_platform.h date back to the separate of_platform_bus_type before it was merged into the regular platform bus. As part of that merge prepping Arm DT support 13 years ago, they "temporarily" include each other. They also include platform_device.h and of.h. As a result, there's a pretty much random mix of those include files used throughout the tree. In order to detangle these headers and replace the implicit includes with struct declarations, users need to explicitly include the correct includes. Signed-off-by: Rob Herring Signed-off-by: Herbert Xu --- drivers/char/hw_random/atmel-rng.c | 2 +- drivers/char/hw_random/bcm2835-rng.c | 3 +-- drivers/char/hw_random/ingenic-trng.c | 2 +- drivers/char/hw_random/iproc-rng200.c | 3 +-- drivers/char/hw_random/npcm-rng.c | 3 +-- drivers/char/hw_random/omap-rng.c | 2 -- drivers/char/hw_random/omap3-rom-rng.c | 1 - drivers/char/hw_random/pasemi-rng.c | 3 +-- drivers/char/hw_random/pic32-rng.c | 5 ++--- drivers/char/hw_random/stm32-rng.c | 3 ++- drivers/char/hw_random/xgene-rng.c | 5 ++--- drivers/char/hw_random/xiphera-trng.c | 1 - 12 files changed, 12 insertions(+), 21 deletions(-) diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index b8effe77d80f..a37367ebcbac 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index e98fcac578d6..e19b0f9f48b9 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -8,8 +8,7 @@ #include #include #include -#include -#include +#include #include #include #include diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c index 3967a8dbe967..1672320e7d3d 100644 --- a/drivers/char/hw_random/ingenic-trng.c +++ b/drivers/char/hw_random/ingenic-trng.c @@ -11,8 +11,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c index 06bc060534d8..34df3f0d3e45 100644 --- a/drivers/char/hw_random/iproc-rng200.c +++ b/drivers/char/hw_random/iproc-rng200.c @@ -12,8 +12,7 @@ #include #include #include -#include -#include +#include #include #include diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c index 9903d0357e06..8a304b754217 100644 --- a/drivers/char/hw_random/npcm-rng.c +++ b/drivers/char/hw_random/npcm-rng.c @@ -8,12 +8,11 @@ #include #include #include +#include #include #include #include -#include #include -#include #define NPCM_RNGCS_REG 0x00 /* Control and status register */ #define NPCM_RNGD_REG 0x04 /* Data register */ diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 00ff96703dd2..be03f76a2a80 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -26,8 +26,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index f06e4f95114f..18dc46b1b58e 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index 2498d4ef9fe2..6959d6edd44c 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -9,11 +9,10 @@ #include #include +#include #include #include #include -#include -#include #include #define SDCRNG_CTL_REG 0x00 diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index 1902f4389a3f..888e6f5cec1f 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -12,9 +12,8 @@ #include #include #include +#include #include -#include -#include #include #include @@ -110,7 +109,7 @@ static struct platform_driver pic32_rng_driver = { .probe = pic32_rng_probe, .driver = { .name = "pic32-rng", - .of_match_table = of_match_ptr(pic32_rng_of_match), + .of_match_table = pic32_rng_of_match, }, }; diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index a6731cf0627a..efb6a9f9a11b 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -10,8 +10,9 @@ #include #include #include +#include #include -#include +#include #include #include #include diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index 7c8f3cb7c6af..c25bb169563d 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -15,9 +15,8 @@ #include #include #include -#include -#include -#include +#include +#include #include #define RNG_MAX_DATUM 4 diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c index 2a9fea72b2e0..2c586d1fe8a9 100644 --- a/drivers/char/hw_random/xiphera-trng.c +++ b/drivers/char/hw_random/xiphera-trng.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include From 1ce1cd8208ad6060e4fcf6e09068c8954687c127 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 28 Jul 2023 13:50:15 -0600 Subject: [PATCH 083/149] hwrng: Enable COMPILE_TEST for more drivers There's quite a few hwrng drivers which are easily enabled for COMPILE_TEST, so let's enable them. The dependency on HW_RANDOM is redundant, so drop that while we're here. Signed-off-by: Rob Herring Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 32 +++++++++++++++------------- drivers/char/hw_random/amd-rng.c | 1 + drivers/char/hw_random/ingenic-rng.c | 2 +- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 5d72b8da3c36..8de74dcfa18c 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -37,7 +37,7 @@ config HW_RANDOM_TIMERIOMEM config HW_RANDOM_INTEL tristate "Intel HW Random Number Generator support" - depends on (X86 || IA64) && PCI + depends on (X86 || IA64 || COMPILE_TEST) && PCI default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -50,7 +50,8 @@ config HW_RANDOM_INTEL config HW_RANDOM_AMD tristate "AMD HW Random Number Generator support" - depends on (X86 || PPC_MAPLE) && PCI + depends on (X86 || PPC_MAPLE || COMPILE_TEST) + depends on PCI && HAS_IOPORT_MAP default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -63,7 +64,7 @@ config HW_RANDOM_AMD config HW_RANDOM_ATMEL tristate "Atmel Random Number Generator support" - depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF + depends on (ARCH_AT91 || COMPILE_TEST) default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -113,7 +114,8 @@ config HW_RANDOM_IPROC_RNG200 config HW_RANDOM_GEODE tristate "AMD Geode HW Random Number Generator support" - depends on X86_32 && PCI + depends on (X86_32 || COMPILE_TEST) + depends on PCI default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -205,7 +207,7 @@ config HW_RANDOM_OCTEON config HW_RANDOM_PASEMI tristate "PA Semi HW Random Number Generator support" - depends on PPC_PASEMI + depends on PPC_PASEMI || (PPC && COMPILE_TEST) default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -228,7 +230,7 @@ config HW_RANDOM_VIRTIO config HW_RANDOM_MXC_RNGA tristate "Freescale i.MX RNGA Random Number Generator" - depends on SOC_IMX31 + depends on SOC_IMX31 || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -241,7 +243,7 @@ config HW_RANDOM_MXC_RNGA config HW_RANDOM_IMX_RNGC tristate "Freescale i.MX RNGC Random Number Generator" - depends on HAS_IOMEM && HAVE_CLK + depends on HAS_IOMEM depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST default HW_RANDOM help @@ -256,8 +258,7 @@ config HW_RANDOM_IMX_RNGC config HW_RANDOM_INGENIC_RNG tristate "Ingenic Random Number Generator support" - depends on HW_RANDOM - depends on MACH_JZ4780 || MACH_X1000 + depends on MACH_JZ4780 || MACH_X1000 || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number Generator @@ -323,7 +324,7 @@ config HW_RANDOM_POWERNV config HW_RANDOM_HISI tristate "Hisilicon Random Number Generator support" - depends on HW_RANDOM && ARCH_HISI + depends on ARCH_HISI || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -347,7 +348,7 @@ config HW_RANDOM_HISTB config HW_RANDOM_ST tristate "ST Microelectronics HW Random Number Generator support" - depends on HW_RANDOM && (ARCH_STI || COMPILE_TEST) + depends on ARCH_STI || COMPILE_TEST help This driver provides kernel-side support for the Random Number Generator hardware found on STi series of SoCs. @@ -357,7 +358,7 @@ config HW_RANDOM_ST config HW_RANDOM_XGENE tristate "APM X-Gene True Random Number Generator (TRNG) support" - depends on HW_RANDOM && ARCH_XGENE + depends on ARCH_XGENE || COMPILE_TEST default HW_RANDOM help This driver provides kernel-side support for the Random Number @@ -370,7 +371,7 @@ config HW_RANDOM_XGENE config HW_RANDOM_STM32 tristate "STMicroelectronics STM32 random number generator" - depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST) + depends on ARCH_STM32 || COMPILE_TEST depends on HAS_IOMEM default HW_RANDOM help @@ -385,7 +386,7 @@ config HW_RANDOM_STM32 config HW_RANDOM_PIC32 tristate "Microchip PIC32 Random Number Generator support" depends on MACH_PIC32 || COMPILE_TEST - default y + default HW_RANDOM if MACH_PIC32 help This driver provides kernel-side support for the Random Number Generator hardware found on a PIC32. @@ -424,7 +425,8 @@ config HW_RANDOM_MESON config HW_RANDOM_CAVIUM tristate "Cavium ThunderX Random Number Generator support" - depends on HW_RANDOM && PCI && ARCH_THUNDER + depends on PCI + depends on ARCH_THUNDER || (ARM64 && COMPILE_TEST) default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 0555e3838bce..86162a13681e 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c index 055cfe59f519..4f18c3fa5427 100644 --- a/drivers/char/hw_random/ingenic-rng.c +++ b/drivers/char/hw_random/ingenic-rng.c @@ -95,7 +95,7 @@ static int ingenic_rng_probe(struct platform_device *pdev) return PTR_ERR(priv->base); } - priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev); + priv->version = (enum ingenic_rng_version)(uintptr_t)of_device_get_match_data(&pdev->dev); priv->rng.name = pdev->name; priv->rng.init = ingenic_rng_init; From b9a281f1f72b9786a4cb04b4105c121f3333aabf Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 4 Aug 2023 08:48:44 -0600 Subject: [PATCH 084/149] hwrng: xgene: Add explicit io.h include Commit 0788257aeebe ("hwrng: Explicitly include correct DT includes") removed an implicit include of io.h. On most architectures, there's still an implicit include of it, but not on s390. Enabling COMPILE_TEST in commit 1ce1cd8208ad ("hwrng: Enable COMPILE_TEST for more drivers") exposed this. Fixes: 0788257aeebe ("hwrng: Explicitly include correct DT includes") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202308042049.8R2tNRoo-lkp@intel.com/ Signed-off-by: Rob Herring Signed-off-by: Herbert Xu --- drivers/char/hw_random/xgene-rng.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index c25bb169563d..99f4e86ac3e9 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include From 33b53749aa1f9fb0f5aa0ac37269944a080b982f Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Mon, 31 Jul 2023 22:02:49 +0800 Subject: [PATCH 085/149] crypto: starfive - fix return value check in starfive_aes_prepare_req() kzalloc() returns NULL pointer not PTR_ERR() when it fails, so replace the IS_ERR() check with NULL pointer check. Fixes: e22471c2331c ("crypto: starfive - Add AES skcipher and aead support") Signed-off-by: Yang Yingliang Signed-off-by: Herbert Xu --- drivers/crypto/starfive/jh7110-aes.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c index 04dd7958054f..278dfa4aa743 100644 --- a/drivers/crypto/starfive/jh7110-aes.c +++ b/drivers/crypto/starfive/jh7110-aes.c @@ -496,8 +496,8 @@ static int starfive_aes_prepare_req(struct skcipher_request *req, if (cryp->assoclen) { rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL); - if (IS_ERR(rctx->adata)) - return dev_err_probe(cryp->dev, PTR_ERR(rctx->adata), + if (!rctx->adata) + return dev_err_probe(cryp->dev, -ENOMEM, "Failed to alloc memory for adata"); scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0); From aec48805163338f8413118796c1dd035661b9140 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 31 Jul 2023 18:54:54 +0200 Subject: [PATCH 086/149] crypto: stm32 - Properly handle pm_runtime_get failing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If pm_runtime_get() (disguised as pm_runtime_resume_and_get()) fails, this means the clk wasn't prepared and enabled. Returning early in this case however is wrong as then the following resource frees are skipped and this is never catched up. So do all the cleanups but clk_disable_unprepare(). Also don't emit a warning, as stm32_hash_runtime_resume() already emitted one. Note that the return value of stm32_hash_remove() is mostly ignored by the device core. The only effect of returning zero instead of an error value is to suppress another warning in platform_remove(). So return 0 even if pm_runtime_resume_and_get() failed. Fixes: 8b4d566de6a5 ("crypto: stm32/hash - Add power management support") Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 88a186c3dd78..75d281edae2a 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -2121,9 +2121,7 @@ static int stm32_hash_remove(struct platform_device *pdev) if (!hdev) return -ENODEV; - ret = pm_runtime_resume_and_get(hdev->dev); - if (ret < 0) - return ret; + ret = pm_runtime_get_sync(hdev->dev); stm32_hash_unregister_algs(hdev); @@ -2139,7 +2137,8 @@ static int stm32_hash_remove(struct platform_device *pdev) pm_runtime_disable(hdev->dev); pm_runtime_put_noidle(hdev->dev); - clk_disable_unprepare(hdev->clk); + if (ret >= 0) + clk_disable_unprepare(hdev->clk); return 0; } From 3feec4ef9f99e2ca3c4614bc0c46e9c0ad337357 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 31 Jul 2023 18:54:55 +0200 Subject: [PATCH 087/149] crypto: stm32 - Drop if block with always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit stm32_hash_remove() is only called after stm32_hash_probe() succeeded. In this case platform_set_drvdata() was called with a non-NULL data patameter. The check for hdev being non-NULL can be dropped because hdev is never NULL (or something bad like memory corruption happened and then the check doesn't help any more either). Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 75d281edae2a..b10243035584 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -2114,13 +2114,9 @@ err_reset: static int stm32_hash_remove(struct platform_device *pdev) { - struct stm32_hash_dev *hdev; + struct stm32_hash_dev *hdev = platform_get_drvdata(pdev); int ret; - hdev = platform_get_drvdata(pdev); - if (!hdev) - return -ENODEV; - ret = pm_runtime_get_sync(hdev->dev); stm32_hash_unregister_algs(hdev); From 7f1045c61876addde13b9aeff2d504aa9dabc3a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 31 Jul 2023 18:54:56 +0200 Subject: [PATCH 088/149] crypto: stm32 - Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is ignored (apart from emitting a warning) and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Eventually after all drivers are converted, .remove_new() is renamed to .remove(). Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index b10243035584..68c52eeaa6b1 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -2112,7 +2112,7 @@ err_reset: return ret; } -static int stm32_hash_remove(struct platform_device *pdev) +static void stm32_hash_remove(struct platform_device *pdev) { struct stm32_hash_dev *hdev = platform_get_drvdata(pdev); int ret; @@ -2135,8 +2135,6 @@ static int stm32_hash_remove(struct platform_device *pdev) if (ret >= 0) clk_disable_unprepare(hdev->clk); - - return 0; } #ifdef CONFIG_PM @@ -2173,7 +2171,7 @@ static const struct dev_pm_ops stm32_hash_pm_ops = { static struct platform_driver stm32_hash_driver = { .probe = stm32_hash_probe, - .remove = stm32_hash_remove, + .remove_new = stm32_hash_remove, .driver = { .name = "stm32-hash", .pm = &stm32_hash_pm_ops, From ac2d838fb7c479434513c8d4565a111fb805edaa Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 1 Aug 2023 19:11:46 +0900 Subject: [PATCH 089/149] crypto: arm64/aes - remove Makefile hack Do it more simiply. This also fixes single target builds. [before] $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- arch/arm64/crypto/aes-glue-ce.i [snip] make[4]: *** No rule to make target 'arch/arm64/crypto/aes-glue-ce.i'. Stop. [after] $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- arch/arm64/crypto/aes-glue-ce.i [snip] CPP arch/arm64/crypto/aes-glue-ce.i Signed-off-by: Masahiro Yamada Acked-by: Will Deacon Signed-off-by: Herbert Xu --- arch/arm64/crypto/Makefile | 5 ----- arch/arm64/crypto/aes-glue-ce.c | 2 ++ arch/arm64/crypto/aes-glue-neon.c | 1 + 3 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 arch/arm64/crypto/aes-glue-ce.c create mode 100644 arch/arm64/crypto/aes-glue-neon.c diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 4818e204c2ac..fbe64dce66e0 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -81,11 +81,6 @@ aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o -CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS - -$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE - $(call if_changed_rule,cc_o_c) - quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $(<) void $(@) diff --git a/arch/arm64/crypto/aes-glue-ce.c b/arch/arm64/crypto/aes-glue-ce.c new file mode 100644 index 000000000000..7d309ceeddf3 --- /dev/null +++ b/arch/arm64/crypto/aes-glue-ce.c @@ -0,0 +1,2 @@ +#define USE_V8_CRYPTO_EXTENSIONS +#include "aes-glue.c" diff --git a/arch/arm64/crypto/aes-glue-neon.c b/arch/arm64/crypto/aes-glue-neon.c new file mode 100644 index 000000000000..8ba046321064 --- /dev/null +++ b/arch/arm64/crypto/aes-glue-neon.c @@ -0,0 +1 @@ +#include "aes-glue.c" From 6b4b53ca0b7300ba2af98a49dbce22054bf034fe Mon Sep 17 00:00:00 2001 From: Frederick Lawler Date: Tue, 1 Aug 2023 08:57:09 -0500 Subject: [PATCH 090/149] crypto: af_alg - Decrement struct key.usage in alg_set_by_key_serial() Calls to lookup_user_key() require a corresponding key_put() to decrement the usage counter. Once it reaches zero, we schedule key GC. Therefore decrement struct key.usage in alg_set_by_key_serial(). Fixes: 7984ceb134bf ("crypto: af_alg - Support symmetric encryption via keyring keys") Cc: Signed-off-by: Frederick Lawler Signed-off-by: Herbert Xu --- crypto/af_alg.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 9ee8575d3b1a..e9969dc0e83c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -320,18 +320,21 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval, if (IS_ERR(ret)) { up_read(&key->sem); + key_put(key); return PTR_ERR(ret); } key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL); if (!key_data) { up_read(&key->sem); + key_put(key); return -ENOMEM; } memcpy(key_data, ret, key_datalen); up_read(&key->sem); + key_put(key); err = type->setkey(ask->private, key_data, key_datalen); From dd8e82f4fa9f0ef18e4409a817609f794ea2d468 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Wed, 2 Aug 2023 17:14:27 +0800 Subject: [PATCH 091/149] crypto: qat - use kfree_sensitive instead of memset/kfree() Use kfree_sensitive() instead of memset() and kfree(). Signed-off-by: Yang Yingliang Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/qat_compression.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c index 3f1f35283266..7842a9f22178 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_compression.c +++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c @@ -234,8 +234,7 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev) dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz, DMA_FROM_DEVICE); - memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz); - kfree(dc_data->ovf_buff); + kfree_sensitive(dc_data->ovf_buff); devm_kfree(dev, dc_data); accel_dev->dc_data = NULL; } From 7999b615fd18e2c2d1aa540b930341c494c0c3e7 Mon Sep 17 00:00:00 2001 From: Ruan Jinjie Date: Thu, 3 Aug 2023 17:29:33 +0800 Subject: [PATCH 092/149] crypto: hisilicon/sec - Do not check for 0 return after calling platform_get_irq() Since commit ce753ad1549c ("platform: finally disallow IRQ0 in platform_get_irq() and its ilk"), there is no possible for platform_get_irq() to return 0. Use the return value from platform_get_irq(). Signed-off-by: Ruan Jinjie Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec/sec_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index e75851326c1e..e1e08993de12 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -1107,8 +1107,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue) } queue->task_irq = platform_get_irq(to_platform_device(dev), queue->queue_id * 2 + 1); - if (queue->task_irq <= 0) { - ret = -EINVAL; + if (queue->task_irq < 0) { + ret = queue->task_irq; goto err_free_ring_db; } From 9ae4577bc077a7e32c3c7d442c95bc76865c0f17 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 3 Aug 2023 17:59:28 +0800 Subject: [PATCH 093/149] crypto: api - Use work queue in crypto_destroy_instance The function crypto_drop_spawn expects to be called in process context. However, when an instance is unregistered while it still has active users, the last user may cause the instance to be freed in atomic context. Fix this by delaying the freeing to a work queue. Fixes: 6bfd48096ff8 ("[CRYPTO] api: Added spawns") Reported-by: Florent Revest Reported-by: syzbot+d769eed29cc42d75e2a3@syzkaller.appspotmail.com Reported-by: syzbot+610ec0671f51e838436e@syzkaller.appspotmail.com Signed-off-by: Herbert Xu Tested-by: Florent Revest Acked-by: Florent Revest Signed-off-by: Herbert Xu --- crypto/algapi.c | 16 ++++++++++++++-- include/crypto/algapi.h | 3 +++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 5e7cd603d489..4fe95c448047 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "internal.h" @@ -74,15 +75,26 @@ static void crypto_free_instance(struct crypto_instance *inst) inst->alg.cra_type->free(inst); } -static void crypto_destroy_instance(struct crypto_alg *alg) +static void crypto_destroy_instance_workfn(struct work_struct *w) { - struct crypto_instance *inst = (void *)alg; + struct crypto_instance *inst = container_of(w, struct crypto_instance, + free_work); struct crypto_template *tmpl = inst->tmpl; crypto_free_instance(inst); crypto_tmpl_put(tmpl); } +static void crypto_destroy_instance(struct crypto_alg *alg) +{ + struct crypto_instance *inst = container_of(alg, + struct crypto_instance, + alg); + + INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn); + schedule_work(&inst->free_work); +} + /* * This function adds a spawn to the list secondary_spawns which * will be used at the end of crypto_remove_spawns to unregister diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 6156161b181f..ca86f4c6ba43 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -12,6 +12,7 @@ #include #include #include +#include /* * Maximum values for blocksize and alignmask, used to allocate @@ -82,6 +83,8 @@ struct crypto_instance { struct crypto_spawn *spawns; }; + struct work_struct free_work; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; From 2a598d0b2800aa23ba51adcf060cec524aaa63b2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 4 Aug 2023 17:24:34 +0800 Subject: [PATCH 094/149] crypto: lib - Move mpi into lib/crypto As lib/mpi is mostly used by crypto code, move it under lib/crypto so that patches touching it get directed to the right mailing list. Signed-off-by: Herbert Xu Reviewed-by: Mimi Zohar Signed-off-by: Herbert Xu --- lib/Makefile | 1 - lib/crypto/Makefile | 2 ++ lib/{ => crypto}/mpi/Makefile | 0 lib/{ => crypto}/mpi/ec.c | 0 lib/{ => crypto}/mpi/generic_mpih-add1.c | 0 lib/{ => crypto}/mpi/generic_mpih-lshift.c | 0 lib/{ => crypto}/mpi/generic_mpih-mul1.c | 0 lib/{ => crypto}/mpi/generic_mpih-mul2.c | 0 lib/{ => crypto}/mpi/generic_mpih-mul3.c | 0 lib/{ => crypto}/mpi/generic_mpih-rshift.c | 0 lib/{ => crypto}/mpi/generic_mpih-sub1.c | 0 lib/{ => crypto}/mpi/longlong.h | 0 lib/{ => crypto}/mpi/mpi-add.c | 0 lib/{ => crypto}/mpi/mpi-bit.c | 0 lib/{ => crypto}/mpi/mpi-cmp.c | 0 lib/{ => crypto}/mpi/mpi-div.c | 0 lib/{ => crypto}/mpi/mpi-inline.h | 0 lib/{ => crypto}/mpi/mpi-internal.h | 0 lib/{ => crypto}/mpi/mpi-inv.c | 0 lib/{ => crypto}/mpi/mpi-mod.c | 0 lib/{ => crypto}/mpi/mpi-mul.c | 0 lib/{ => crypto}/mpi/mpi-pow.c | 0 lib/{ => crypto}/mpi/mpi-sub-ui.c | 0 lib/{ => crypto}/mpi/mpicoder.c | 0 lib/{ => crypto}/mpi/mpih-cmp.c | 0 lib/{ => crypto}/mpi/mpih-div.c | 0 lib/{ => crypto}/mpi/mpih-mul.c | 0 lib/{ => crypto}/mpi/mpiutil.c | 0 28 files changed, 2 insertions(+), 1 deletion(-) rename lib/{ => crypto}/mpi/Makefile (100%) rename lib/{ => crypto}/mpi/ec.c (100%) rename lib/{ => crypto}/mpi/generic_mpih-add1.c (100%) rename lib/{ => crypto}/mpi/generic_mpih-lshift.c (100%) rename lib/{ => crypto}/mpi/generic_mpih-mul1.c (100%) rename lib/{ => crypto}/mpi/generic_mpih-mul2.c (100%) rename lib/{ => crypto}/mpi/generic_mpih-mul3.c (100%) rename lib/{ => crypto}/mpi/generic_mpih-rshift.c (100%) rename lib/{ => crypto}/mpi/generic_mpih-sub1.c (100%) rename lib/{ => crypto}/mpi/longlong.h (100%) rename lib/{ => crypto}/mpi/mpi-add.c (100%) rename lib/{ => crypto}/mpi/mpi-bit.c (100%) rename lib/{ => crypto}/mpi/mpi-cmp.c (100%) rename lib/{ => crypto}/mpi/mpi-div.c (100%) rename lib/{ => crypto}/mpi/mpi-inline.h (100%) rename lib/{ => crypto}/mpi/mpi-internal.h (100%) rename lib/{ => crypto}/mpi/mpi-inv.c (100%) rename lib/{ => crypto}/mpi/mpi-mod.c (100%) rename lib/{ => crypto}/mpi/mpi-mul.c (100%) rename lib/{ => crypto}/mpi/mpi-pow.c (100%) rename lib/{ => crypto}/mpi/mpi-sub-ui.c (100%) rename lib/{ => crypto}/mpi/mpicoder.c (100%) rename lib/{ => crypto}/mpi/mpih-cmp.c (100%) rename lib/{ => crypto}/mpi/mpih-div.c (100%) rename lib/{ => crypto}/mpi/mpih-mul.c (100%) rename lib/{ => crypto}/mpi/mpiutil.c (100%) diff --git a/lib/Makefile b/lib/Makefile index 42d307ade225..1f5235478259 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -253,7 +253,6 @@ obj-$(CONFIG_DQL) += dynamic_queue_limits.o obj-$(CONFIG_GLOB) += glob.o obj-$(CONFIG_GLOB_SELFTEST) += globtest.o -obj-$(CONFIG_MPILIB) += mpi/ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 6ec2d4543d9c..8d1446c2be71 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -53,3 +53,5 @@ libblake2s-y += blake2s-selftest.o libchacha20poly1305-y += chacha20poly1305-selftest.o libcurve25519-y += curve25519-selftest.o endif + +obj-$(CONFIG_MPILIB) += mpi/ diff --git a/lib/mpi/Makefile b/lib/crypto/mpi/Makefile similarity index 100% rename from lib/mpi/Makefile rename to lib/crypto/mpi/Makefile diff --git a/lib/mpi/ec.c b/lib/crypto/mpi/ec.c similarity index 100% rename from lib/mpi/ec.c rename to lib/crypto/mpi/ec.c diff --git a/lib/mpi/generic_mpih-add1.c b/lib/crypto/mpi/generic_mpih-add1.c similarity index 100% rename from lib/mpi/generic_mpih-add1.c rename to lib/crypto/mpi/generic_mpih-add1.c diff --git a/lib/mpi/generic_mpih-lshift.c b/lib/crypto/mpi/generic_mpih-lshift.c similarity index 100% rename from lib/mpi/generic_mpih-lshift.c rename to lib/crypto/mpi/generic_mpih-lshift.c diff --git a/lib/mpi/generic_mpih-mul1.c b/lib/crypto/mpi/generic_mpih-mul1.c similarity index 100% rename from lib/mpi/generic_mpih-mul1.c rename to lib/crypto/mpi/generic_mpih-mul1.c diff --git a/lib/mpi/generic_mpih-mul2.c b/lib/crypto/mpi/generic_mpih-mul2.c similarity index 100% rename from lib/mpi/generic_mpih-mul2.c rename to lib/crypto/mpi/generic_mpih-mul2.c diff --git a/lib/mpi/generic_mpih-mul3.c b/lib/crypto/mpi/generic_mpih-mul3.c similarity index 100% rename from lib/mpi/generic_mpih-mul3.c rename to lib/crypto/mpi/generic_mpih-mul3.c diff --git a/lib/mpi/generic_mpih-rshift.c b/lib/crypto/mpi/generic_mpih-rshift.c similarity index 100% rename from lib/mpi/generic_mpih-rshift.c rename to lib/crypto/mpi/generic_mpih-rshift.c diff --git a/lib/mpi/generic_mpih-sub1.c b/lib/crypto/mpi/generic_mpih-sub1.c similarity index 100% rename from lib/mpi/generic_mpih-sub1.c rename to lib/crypto/mpi/generic_mpih-sub1.c diff --git a/lib/mpi/longlong.h b/lib/crypto/mpi/longlong.h similarity index 100% rename from lib/mpi/longlong.h rename to lib/crypto/mpi/longlong.h diff --git a/lib/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c similarity index 100% rename from lib/mpi/mpi-add.c rename to lib/crypto/mpi/mpi-add.c diff --git a/lib/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c similarity index 100% rename from lib/mpi/mpi-bit.c rename to lib/crypto/mpi/mpi-bit.c diff --git a/lib/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c similarity index 100% rename from lib/mpi/mpi-cmp.c rename to lib/crypto/mpi/mpi-cmp.c diff --git a/lib/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c similarity index 100% rename from lib/mpi/mpi-div.c rename to lib/crypto/mpi/mpi-div.c diff --git a/lib/mpi/mpi-inline.h b/lib/crypto/mpi/mpi-inline.h similarity index 100% rename from lib/mpi/mpi-inline.h rename to lib/crypto/mpi/mpi-inline.h diff --git a/lib/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h similarity index 100% rename from lib/mpi/mpi-internal.h rename to lib/crypto/mpi/mpi-internal.h diff --git a/lib/mpi/mpi-inv.c b/lib/crypto/mpi/mpi-inv.c similarity index 100% rename from lib/mpi/mpi-inv.c rename to lib/crypto/mpi/mpi-inv.c diff --git a/lib/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c similarity index 100% rename from lib/mpi/mpi-mod.c rename to lib/crypto/mpi/mpi-mod.c diff --git a/lib/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c similarity index 100% rename from lib/mpi/mpi-mul.c rename to lib/crypto/mpi/mpi-mul.c diff --git a/lib/mpi/mpi-pow.c b/lib/crypto/mpi/mpi-pow.c similarity index 100% rename from lib/mpi/mpi-pow.c rename to lib/crypto/mpi/mpi-pow.c diff --git a/lib/mpi/mpi-sub-ui.c b/lib/crypto/mpi/mpi-sub-ui.c similarity index 100% rename from lib/mpi/mpi-sub-ui.c rename to lib/crypto/mpi/mpi-sub-ui.c diff --git a/lib/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c similarity index 100% rename from lib/mpi/mpicoder.c rename to lib/crypto/mpi/mpicoder.c diff --git a/lib/mpi/mpih-cmp.c b/lib/crypto/mpi/mpih-cmp.c similarity index 100% rename from lib/mpi/mpih-cmp.c rename to lib/crypto/mpi/mpih-cmp.c diff --git a/lib/mpi/mpih-div.c b/lib/crypto/mpi/mpih-div.c similarity index 100% rename from lib/mpi/mpih-div.c rename to lib/crypto/mpi/mpih-div.c diff --git a/lib/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c similarity index 100% rename from lib/mpi/mpih-mul.c rename to lib/crypto/mpi/mpih-mul.c diff --git a/lib/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c similarity index 100% rename from lib/mpi/mpiutil.c rename to lib/crypto/mpi/mpiutil.c From 9e47a758b70167c9301d2b44d2569f86c7796f2d Mon Sep 17 00:00:00 2001 From: Mark O'Donovan Date: Fri, 4 Aug 2023 09:32:18 +0000 Subject: [PATCH 095/149] crypto: lib/mpi - avoid null pointer deref in mpi_cmp_ui() During NVMeTCP Authentication a controller can trigger a kernel oops by specifying the 8192 bit Diffie Hellman group and passing a correctly sized, but zeroed Diffie Hellamn value. mpi_cmp_ui() was detecting this if the second parameter was 0, but 1 is passed from dh_is_pubkey_valid(). This causes the null pointer u->d to be dereferenced towards the end of mpi_cmp_ui() Signed-off-by: Mark O'Donovan Signed-off-by: Herbert Xu --- lib/crypto/mpi/mpi-cmp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/crypto/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c index c4cfa3ff0581..0835b6213235 100644 --- a/lib/crypto/mpi/mpi-cmp.c +++ b/lib/crypto/mpi/mpi-cmp.c @@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v) mpi_limb_t limb = v; mpi_normalize(u); - if (!u->nlimbs && !limb) - return 0; + if (u->nlimbs == 0) { + if (v == 0) + return 0; + else + return -1; + } if (u->sign) return -1; if (u->nlimbs > 1) From 91cb1e1432b3d873a0c8831d0dd8022db98ac8b8 Mon Sep 17 00:00:00 2001 From: Joachim Vandersmissen Date: Sun, 6 Aug 2023 14:19:03 -0500 Subject: [PATCH 096/149] crypto: jitter - Add clarifying comments to Jitter Entropy RCT cutoff values The RCT cutoff values are correct, but they don't exactly match the ones one would expect when computing them using the formula in SP800-90B. This discrepancy is due to the fact that the Jitter Entropy RCT starts at 1. To avoid any confusion by future reviewers, add some comments and explicitly subtract 1 from the "correct" cutoff values in the definitions. Signed-off-by: Joachim Vandersmissen Reviewed-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index c7d7f2caa779..fe9c233ec769 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -89,10 +89,14 @@ struct rand_data { unsigned int rct_count; /* Number of stuck values */ /* Intermittent health test failure threshold of 2^-30 */ -#define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */ -#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ + /* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */ + /* However, our RCT implementation starts at 1, so we subtract 1 here. */ +#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */ +#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ /* Permanent health test failure threshold of 2^-60 */ -#define JENT_RCT_CUTOFF_PERMANENT 60 + /* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */ + /* However, our RCT implementation starts at 1, so we subtract 1 here. */ +#define JENT_RCT_CUTOFF_PERMANENT (61 - 1) #define JENT_APT_CUTOFF_PERMANENT 355 #define JENT_APT_WINDOW_SIZE 512 /* Data window size */ /* LSB of time stamp to process */ From b52c8c72dd7c2c5695bc86a31d48dd8da4127d94 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 7 Aug 2023 13:16:43 +0200 Subject: [PATCH 097/149] crypto: caam - fix PM operations definition The newly added PM operations use the deprecated SIMPLE_DEV_PM_OPS() macro, causing a warning in some configurations: drivers/crypto/caam/ctrl.c:828:12: error: 'caam_ctrl_resume' defined but not used [-Werror=unused-function] 828 | static int caam_ctrl_resume(struct device *dev) | ^~~~~~~~~~~~~~~~ drivers/crypto/caam/ctrl.c:818:12: error: 'caam_ctrl_suspend' defined but not used [-Werror=unused-function] 818 | static int caam_ctrl_suspend(struct device *dev) | ^~~~~~~~~~~~~~~~~ drivers/crypto/caam/jr.c:732:12: error: 'caam_jr_resume' defined but not used [-Werror=unused-function] 732 | static int caam_jr_resume(struct device *dev) | ^~~~~~~~~~~~~~ drivers/crypto/caam/jr.c:687:12: error: 'caam_jr_suspend' defined but not used [-Werror=unused-function] 687 | static int caam_jr_suspend(struct device *dev) | ^~~~~~~~~~~~~~~ Use the normal DEFINE_SIMPLE_DEV_PM_OPS() variant now, and use pm_ptr() to completely eliminate the structure in configs without CONFIG_PM. Fixes: 322d74752c28a ("crypto: caam - add power management support") Signed-off-by: Arnd Bergmann Reviewed-by: Meenakshi Aggarwal Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 4 ++-- drivers/crypto/caam/jr.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index a7a4583107f4..2a228a36fa15 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -841,7 +841,7 @@ static int caam_ctrl_resume(struct device *dev) return ret; } -static SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume); /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) @@ -1138,7 +1138,7 @@ static struct platform_driver caam_driver = { .driver = { .name = "caam", .of_match_table = caam_match, - .pm = &caam_ctrl_pm_ops, + .pm = pm_ptr(&caam_ctrl_pm_ops), }, .probe = caam_probe, }; diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 316180d26f8a..767fbf052536 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -794,7 +794,7 @@ add_jr: return 0; } -static SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume); static const struct of_device_id caam_jr_match[] = { { @@ -811,7 +811,7 @@ static struct platform_driver caam_jr_driver = { .driver = { .name = "caam_jr", .of_match_table = caam_jr_match, - .pm = &caam_jr_pm_ops, + .pm = pm_ptr(&caam_jr_pm_ops), }, .probe = caam_jr_probe, .remove = caam_jr_remove, From e30685204711a6be40dec2622606950ccd37dafe Mon Sep 17 00:00:00 2001 From: Gaurav Jain Date: Tue, 8 Aug 2023 12:55:25 +0200 Subject: [PATCH 098/149] crypto: caam - fix unchecked return value error error: Unchecked return value (CHECKED_RETURN) check_return: Calling sg_miter_next without checking return value fix: added check if(!sg_miter_next) Fixes: 8a2a0dd35f2e ("crypto: caam - strip input zeros from RSA input buffer") Signed-off-by: Gaurav Jain Signed-off-by: Meenakshi Aggarwal Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 72afc249d42f..7e08af751e4e 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -225,7 +225,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, if (len && *buff) break; - sg_miter_next(&miter); + if (!sg_miter_next(&miter)) + break; + buff = miter.addr; len = miter.length; From e47e6d2aaacd30e93b97f20d4a20556cefd18406 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Tue, 8 Aug 2023 12:55:26 +0200 Subject: [PATCH 099/149] crypto: caam - increase the domain of write memory barrier to full system In caam_jr_enqueue, under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input ring be updated before the CAAM starts reading it. So, CAAM will process, again, an old descriptor address and will put it in the output ring. This will make caam_jr_dequeue() to fail, since this old descriptor is not in the software ring. To fix this, use wmb() which works on the full system instead of inner/outer shareable domains. Signed-off-by: Iuliana Prodan Signed-off-by: Meenakshi Aggarwal Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 767fbf052536..5507d5d34a4c 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -464,8 +464,16 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, * Guarantee that the descriptor's DMA address has been written to * the next slot in the ring before the write index is updated, since * other cores may update this index independently. + * + * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input + * ring be updated before the CAAM starts reading it. So, CAAM will + * process, again, an old descriptor address and will put it in the + * output ring. This will make caam_jr_dequeue() to fail, since this + * old descriptor is not in the software ring. + * To fix this, use wmb() which works on the full system instead of + * inner/outer shareable domains. */ - smp_wmb(); + wmb(); jrp->head = (head + 1) & (JOBR_DEPTH - 1); From 23d422a4f127901b2e58d925f130d4ce534a662a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Tue, 8 Aug 2023 12:55:27 +0200 Subject: [PATCH 100/149] crypto: caam/jr - fix shared IRQ line handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are cases when the interrupt status register (JRINTR) is non-zero, even though: 1. An interrupt was generated, but it was masked OR 2. There was no interrupt generated at all for the corresponding job ring. 1. The case when interrupt is masked (JRCFGR_LS[IMSK]=1b'1) while other events have happened and are being accounted for, e.g. -JRINTR[HALT]=2b'10 - input job ring underwent a flush of all on-going jobs and processing of still-existing jobs (sitting in the ring) has been halted -JRINTR[HALT]=2b'01 - input job ring is currently undergoing a flush -JRINTR[ENTER_FAIL]=1b'1 - SecMon / SNVS transitioned to FAIL MODE It doesn't matter whether these events would assert the interrupt signal or not, interrupt is anyhow masked. 2. The case when interrupt is not masked (JRCFGR_LS[IMSK]=1b'0), however the events accounted for in JRINTR do not generate interrupts, e.g.: -JRINTR[HALT]=2b'01 -JRINTR[ENTER_FAIL]=1b'1 and JRCFGR_MS[FAIL_MODE]=1b'0 Currently in these cases, when the JR interrupt handler is invoked (as a consequence of JR sharing the interrupt line with other devices - e.g. the two JRs on i.MX7ULP) it continues execution instead of returning IRQ_NONE. This could lead to situations like interrupt handler clearing JRINTR (and thus also the JRINTR[HALT] field) while corresponding job ring is suspended and then that job ring failing on resume path, due to expecting JRINTR[HALT]=b'10 and reading instead JRINTR[HALT]=b'00. Fix this by checking status of JRINTR[JRI] in the JR interrupt handler. If JRINTR[JRI]=1b'0, there was no interrupt generated for this JR and handler must return IRQ_NONE. Signed-off-by: Horia Geantă Signed-off-by: Meenakshi Aggarwal Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 5507d5d34a4c..07ec2f27cc68 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -232,7 +232,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) * tasklet if jobs done. */ irqstate = rd_reg32(&jrp->rregs->jrintstatus); - if (!irqstate) + if (!(irqstate & JRINT_JR_INT)) return IRQ_NONE; /* From 64dd341e66f45cdde95f81788d0b13636847fcc3 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Wed, 9 Aug 2023 11:14:43 +0800 Subject: [PATCH 101/149] crypto: allwinner - Remove unused function declarations Commit 06f751b61329 ("crypto: allwinner - Add sun8i-ce Crypto Engine") declared but never implemented sun8i_ce_enqueue(). Commit 56f6d5aee88d ("crypto: sun8i-ce - support hash algorithms") declared but never implemented sun8i_ce_hash(). Commit f08fcced6d00 ("crypto: allwinner - Add sun8i-ss cryptographic offloader") declared but never implemented sun8i_ss_enqueue(). Signed-off-by: Yue Haibing Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h | 3 --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h | 2 -- 2 files changed, 5 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 27029fb77e29..4742b48ef52e 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -347,8 +347,6 @@ struct sun8i_ce_alg_template { char fbname[CRYPTO_MAX_ALG_NAME]; }; -int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type); - int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, @@ -367,7 +365,6 @@ void sun8i_ce_hash_craexit(struct crypto_tfm *tfm); int sun8i_ce_hash_init(struct ahash_request *areq); int sun8i_ce_hash_export(struct ahash_request *areq, void *out); int sun8i_ce_hash_import(struct ahash_request *areq, const void *in); -int sun8i_ce_hash(struct ahash_request *areq); int sun8i_ce_hash_final(struct ahash_request *areq); int sun8i_ce_hash_update(struct ahash_request *areq); int sun8i_ce_hash_finup(struct ahash_request *areq); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h index df6f08f6092f..bfe305261e9a 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -293,8 +293,6 @@ struct sun8i_ss_alg_template { char fbname[CRYPTO_MAX_ALG_NAME]; }; -int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type); - int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, From d94e0f25deefda953ac348a244189cf358b8bd3e Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Wed, 9 Aug 2023 11:16:14 +0800 Subject: [PATCH 102/149] crypto: qat - Remove unused function declarations Commit d8cba25d2c68 ("crypto: qat - Intel(R) QAT driver framework") declared but never implemented these functions. Signed-off-by: Yue Haibing Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/adf_common_drv.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 799a2193d3e5..673b5044c62a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -58,12 +58,6 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); - -int adf_ctl_dev_register(void); -void adf_ctl_dev_unregister(void); -int adf_processes_dev_register(void); -void adf_processes_dev_unregister(void); - int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, @@ -182,8 +176,6 @@ int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle, int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned long ctx_mask, unsigned short reg_num, unsigned int regdata); -int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, - unsigned char ae, unsigned short lm_addr, unsigned int value); void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned char mode); int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); From d1c02e876f2e63520fd4fe393ed6b68882c9f848 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Thu, 10 Aug 2023 12:00:23 +0200 Subject: [PATCH 103/149] crypto: exynos - fix Wvoid-pointer-to-enum-cast warning 'type' is an enum, thus cast of pointer on 64-bit compile test with W=1 causes: exynos-rng.c:280:14: error: cast to smaller integer type 'enum exynos_prng_type' from 'const void *' [-Werror,-Wvoid-pointer-to-enum-cast] Signed-off-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/exynos-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c index cbd8ca6e52ee..b1df66be9adc 100644 --- a/drivers/crypto/exynos-rng.c +++ b/drivers/crypto/exynos-rng.c @@ -277,7 +277,7 @@ static int exynos_rng_probe(struct platform_device *pdev) if (!rng) return -ENOMEM; - rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev); + rng->type = (uintptr_t)of_device_get_match_data(&pdev->dev); mutex_init(&rng->lock); From d88bdbd96d258ab7973e67b94701c55324e9885f Mon Sep 17 00:00:00 2001 From: GUO Zihua Date: Thu, 10 Aug 2023 21:00:43 +0800 Subject: [PATCH 104/149] hwrng: core - Remove duplicated include Remove duplicated include of linux/random.h. Resolves checkincludes message. And adjust includes in alphabetical order. Signed-off-by: GUO Zihua Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index f34d356fe2c0..e3598ec9cfca 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -15,14 +15,13 @@ #include #include #include -#include #include #include -#include #include #include #include #include +#include #include #include From 8e03dd62e5be811efbf0cbeba47e79e793519105 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 10 Aug 2023 12:22:08 -0700 Subject: [PATCH 105/149] hwrng: iproc-rng200 - Implement suspend and resume calls Chips such as BCM7278 support system wide suspend/resume which will cause the HWRNG block to lose its state and reset to its power on reset register values. We need to cleanup and re-initialize the HWRNG for it to be functional coming out of a system suspend cycle. Fixes: c3577f6100ca ("hwrng: iproc-rng200 - Add support for BCM7278") Signed-off-by: Florian Fainelli Signed-off-by: Herbert Xu --- drivers/char/hw_random/iproc-rng200.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c index 34df3f0d3e45..440fe28bddc0 100644 --- a/drivers/char/hw_random/iproc-rng200.c +++ b/drivers/char/hw_random/iproc-rng200.c @@ -181,6 +181,8 @@ static int iproc_rng200_probe(struct platform_device *pdev) return PTR_ERR(priv->base); } + dev_set_drvdata(dev, priv); + priv->rng.name = "iproc-rng200"; priv->rng.read = iproc_rng200_read; priv->rng.init = iproc_rng200_init; @@ -198,6 +200,28 @@ static int iproc_rng200_probe(struct platform_device *pdev) return 0; } +static int __maybe_unused iproc_rng200_suspend(struct device *dev) +{ + struct iproc_rng200_dev *priv = dev_get_drvdata(dev); + + iproc_rng200_cleanup(&priv->rng); + + return 0; +} + +static int __maybe_unused iproc_rng200_resume(struct device *dev) +{ + struct iproc_rng200_dev *priv = dev_get_drvdata(dev); + + iproc_rng200_init(&priv->rng); + + return 0; +} + +static const struct dev_pm_ops iproc_rng200_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume) +}; + static const struct of_device_id iproc_rng200_of_match[] = { { .compatible = "brcm,bcm2711-rng200", }, { .compatible = "brcm,bcm7211-rng200", }, @@ -211,6 +235,7 @@ static struct platform_driver iproc_rng200_driver = { .driver = { .name = "iproc-rng200", .of_match_table = iproc_rng200_of_match, + .pm = &iproc_rng200_pm_ops, }, .probe = iproc_rng200_probe, }; From f9fc1ec28baeeef6259127b3cbeb34986834574f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 11 Aug 2023 15:46:33 +0200 Subject: [PATCH 106/149] crypto: drivers - avoid memcpy size warning Some configurations with gcc-12 or gcc-13 produce a warning for the source and destination of a memcpy() in atmel_sha_hmac_compute_ipad_hash() potentially overlapping: In file included from include/linux/string.h:254, from drivers/crypto/atmel-sha.c:15: drivers/crypto/atmel-sha.c: In function 'atmel_sha_hmac_compute_ipad_hash': include/linux/fortify-string.h:57:33: error: '__builtin_memcpy' accessing 129 or more bytes at offsets 408 and 280 overlaps 1 or more bytes at offset 408 [-Werror=restrict] 57 | #define __underlying_memcpy __builtin_memcpy | ^ include/linux/fortify-string.h:648:9: note: in expansion of macro '__underlying_memcpy' 648 | __underlying_##op(p, q, __fortify_size); \ | ^~~~~~~~~~~~~ include/linux/fortify-string.h:693:26: note: in expansion of macro '__fortify_memcpy_chk' 693 | #define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \ | ^~~~~~~~~~~~~~~~~~~~ drivers/crypto/atmel-sha.c:1773:9: note: in expansion of macro 'memcpy' 1773 | memcpy(hmac->opad, hmac->ipad, bs); | ^~~~~~ The same thing happens in two more drivers that have the same logic: drivers/crypto/chelsio/chcr_algo.c: In function 'chcr_ahash_setkey': include/linux/fortify-string.h:57:33: error: '__builtin_memcpy' accessing 129 or more bytes at offsets 260 and 132 overlaps 1 or more bytes at offset 260 [-Werror=restrict] drivers/crypto/bcm/cipher.c: In function 'ahash_hmac_setkey': include/linux/fortify-string.h:57:33: error: '__builtin_memcpy' accessing between 129 and 4294967295 bytes at offsets 840 and 712 overlaps between 1 and 4294967167 bytes at offset 840 [-Werror=restrict] I don't think it can actually happen because the size is strictly bounded to the available block sizes, at most 128 bytes, though inlining decisions could lead gcc to not see that. Use the unsafe_memcpy() helper instead of memcpy(), with the only difference being that this skips the hardening checks that produce the warning. Suggested-by: Herbert Xu Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 3 ++- drivers/crypto/bcm/cipher.c | 3 ++- drivers/crypto/chelsio/chcr_algo.c | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 54fec72dfba2..99a9ff8e743f 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1770,7 +1770,8 @@ static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd) size_t bs = ctx->block_size; size_t i, num_words = bs / sizeof(u32); - memcpy(hmac->opad, hmac->ipad, bs); + unsafe_memcpy(hmac->opad, hmac->ipad, bs, + "fortified memcpy causes -Wrestrict warning"); for (i = 0; i < num_words; ++i) { hmac->ipad[i] ^= 0x36363636; hmac->opad[i] ^= 0x5c5c5c5c; diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 70b911baab26..4c46357e2570 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2397,7 +2397,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, memset(ctx->ipad + ctx->authkeylen, 0, blocksize - ctx->authkeylen); ctx->authkeylen = 0; - memcpy(ctx->opad, ctx->ipad, blocksize); + unsafe_memcpy(ctx->opad, ctx->ipad, blocksize, + "fortified memcpy causes -Wrestrict warning"); for (index = 0; index < blocksize; index++) { ctx->ipad[index] ^= HMAC_IPAD_VALUE; diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 0eade4fa6695..16298ae4a00b 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2216,7 +2216,8 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, memcpy(hmacctx->ipad, key, keylen); } memset(hmacctx->ipad + keylen, 0, bs - keylen); - memcpy(hmacctx->opad, hmacctx->ipad, bs); + unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs, + "fortified memcpy causes -Wrestrict warning"); for (i = 0; i < bs / sizeof(int); i++) { *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; From b9296bb41275ef2173d2c8b144b808f5e6d18bbe Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Fri, 11 Aug 2023 22:50:56 +0200 Subject: [PATCH 107/149] dt-bindings: crypto: qcom,prng: Add SM8450 SM8450's PRNG does not require a core clock reference. Add a new compatible with a qcom,prng-ee fallback and handle that. Signed-off-by: Konrad Dybcio Acked-by: Conor Dooley Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/qcom,prng.yaml | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml index bb42f4588b40..36b0ebd9a44b 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml @@ -11,9 +11,13 @@ maintainers: properties: compatible: - enum: - - qcom,prng # 8916 etc. - - qcom,prng-ee # 8996 and later using EE + oneOf: + - enum: + - qcom,prng # 8916 etc. + - qcom,prng-ee # 8996 and later using EE + - items: + - const: qcom,sm8450-prng-ee + - const: qcom,prng-ee reg: maxItems: 1 @@ -28,8 +32,18 @@ properties: required: - compatible - reg - - clocks - - clock-names + +allOf: + - if: + not: + properties: + compatible: + contains: + const: qcom,sm8450-prng-ee + then: + required: + - clocks + - clock-names additionalProperties: false From cdb8b7e1692a81f26030050ec30aadd2e7538fef Mon Sep 17 00:00:00 2001 From: Konrad Dybcio Date: Fri, 11 Aug 2023 22:50:57 +0200 Subject: [PATCH 108/149] crypto: qcom-rng: Make the core clock optional regardless of ACPI presence Some newer SoCs (like SM8450) do not require a clock vote for the PRNG to function. Make it entirely optional and rely on the bindings checker to ensure platforms that need it, consume one. Signed-off-by: Konrad Dybcio Reviewed-by: Bjorn Andersson Signed-off-by: Herbert Xu --- drivers/crypto/qcom-rng.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 72dd1a4ebac4..825a729f205e 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -173,13 +173,9 @@ static int qcom_rng_probe(struct platform_device *pdev) if (IS_ERR(rng->base)) return PTR_ERR(rng->base); - /* ACPI systems have clk already on, so skip clk_get */ - if (!has_acpi_companion(&pdev->dev)) { - rng->clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(rng->clk)) - return PTR_ERR(rng->clk); - } - + rng->clk = devm_clk_get_optional(&pdev->dev, "core"); + if (IS_ERR(rng->clk)) + return PTR_ERR(rng->clk); rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev); From 4136212ab18eb3dce6efb6e18108765c36708f71 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:07 +0800 Subject: [PATCH 109/149] crypto: sun8i-ce - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- .../allwinner/sun8i-ce/sun8i-ce-cipher.c | 20 +++++++++++++------ .../crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 2 -- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index c13550090785..573a08fa7afa 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -294,7 +294,7 @@ theend: return err; } -static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) +static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) { struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq); @@ -308,10 +308,10 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); local_bh_enable(); - return 0; } -static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req) +static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine, + void *async_req) { struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); @@ -353,7 +353,17 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r } dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); +} +static int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq) +{ + int err = sun8i_ce_cipher_prepare(engine, areq); + + if (err) + return err; + + sun8i_ce_cipher_run(engine, areq); + sun8i_ce_cipher_unprepare(engine, areq); return 0; } @@ -423,9 +433,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), CRYPTO_MAX_ALG_NAME); - op->enginectx.op.do_one_request = sun8i_ce_cipher_run; - op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare; - op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare; + op->enginectx.op.do_one_request = sun8i_ce_cipher_do_one; err = pm_runtime_get_sync(op->ce->dev); if (err < 0) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 930ad157004c..04d7d890de58 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -32,8 +32,6 @@ int sun8i_ce_hash_crainit(struct crypto_tfm *tfm) op->ce = algt->ce; op->enginectx.op.do_one_request = sun8i_ce_hash_run; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; /* FALLBACK */ op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, From ff0800af25011c0dc0563f9cc72bb2a9158d12c2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:09 +0800 Subject: [PATCH 110/149] crypto: sun8i-ss - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 2 -- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 381a90fbeaff..2758dadf74eb 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -409,8 +409,6 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) CRYPTO_MAX_ALG_NAME); op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) { diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index a4b67d130d11..e6865b49b1df 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -120,8 +120,6 @@ int sun8i_ss_hash_crainit(struct crypto_tfm *tfm) op->ss = algt->ss; op->enginectx.op.do_one_request = sun8i_ss_hash_run; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; /* FALLBACK */ op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, From db9f49e802d9f439cceaaf66ede841b32346e10e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:11 +0800 Subject: [PATCH 111/149] crypto: amlogic - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/amlogic-gxl-cipher.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index af017a087ebf..03b6586b71ef 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -332,8 +332,6 @@ int meson_cipher_init(struct crypto_tfm *tfm) crypto_skcipher_reqsize(op->fallback_tfm); op->enginectx.op.do_one_request = meson_handle_cipher_request; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; return 0; } From 13bba5b505695eec89fde5bd68a12b83f71adc33 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:13 +0800 Subject: [PATCH 112/149] crypto: aspeed - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-acry.c | 2 -- drivers/crypto/aspeed/aspeed-hace-crypto.c | 2 -- drivers/crypto/aspeed/aspeed-hace-hash.c | 14 ++++++++------ 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c index 470122c87fea..5ae529ce6806 100644 --- a/drivers/crypto/aspeed/aspeed-acry.c +++ b/drivers/crypto/aspeed/aspeed-acry.c @@ -590,8 +590,6 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm) } ctx->enginectx.op.do_one_request = aspeed_acry_do_request; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; return 0; } diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c index ef73b0028b4d..8d6d9ecb3a28 100644 --- a/drivers/crypto/aspeed/aspeed-hace-crypto.c +++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c @@ -714,8 +714,6 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm) crypto_skcipher_reqsize(ctx->fallback_tfm)); ctx->enginectx.op.do_one_request = aspeed_crypto_do_request; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; return 0; } diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c index 935135229ebd..f8c96568142e 100644 --- a/drivers/crypto/aspeed/aspeed-hace-hash.c +++ b/drivers/crypto/aspeed/aspeed-hace-hash.c @@ -565,8 +565,8 @@ static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq) return 0; } -static int aspeed_ahash_prepare_request(struct crypto_engine *engine, - void *areq) +static void aspeed_ahash_prepare_request(struct crypto_engine *engine, + void *areq) { struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -581,8 +581,12 @@ static int aspeed_ahash_prepare_request(struct crypto_engine *engine, hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg; else hash_engine->dma_prepare = aspeed_ahash_dma_prepare; +} - return 0; +static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq) +{ + aspeed_ahash_prepare_request(engine, areq); + return aspeed_ahash_do_request(engine, areq); } static int aspeed_sham_update(struct ahash_request *req) @@ -876,9 +880,7 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm) } } - tctx->enginectx.op.do_one_request = aspeed_ahash_do_request; - tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request; - tctx->enginectx.op.unprepare_request = NULL; + tctx->enginectx.op.do_one_request = aspeed_ahash_do_one; return 0; } From 0a3fa126578eb69eda563ac4f95955a8bb1715ed Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:15 +0800 Subject: [PATCH 113/149] crypto: sl3516 - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/gemini/sl3516-ce-cipher.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c index 14d0d83d388d..0232f847785a 100644 --- a/drivers/crypto/gemini/sl3516-ce-cipher.c +++ b/drivers/crypto/gemini/sl3516-ce-cipher.c @@ -336,8 +336,6 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm) crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request; - op->enginectx.op.prepare_request = NULL; - op->enginectx.op.unprepare_request = NULL; err = pm_runtime_get_sync(op->ce->dev); if (err < 0) From 08d81da7a848a7aedb971d5d7d1ee9e373d3ecec Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:17 +0800 Subject: [PATCH 114/149] crypto: keembay - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/intel/keembay/keembay-ocs-aes-core.c | 4 ---- drivers/crypto/intel/keembay/keembay-ocs-ecc.c | 2 -- 2 files changed, 6 deletions(-) diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c index ae31be00357a..f94f48289683 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c @@ -1150,9 +1150,7 @@ static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req) static inline int ocs_common_init(struct ocs_aes_tctx *tctx) { - tctx->engine_ctx.op.prepare_request = NULL; tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request; - tctx->engine_ctx.op.unprepare_request = NULL; return 0; } @@ -1208,9 +1206,7 @@ static void ocs_exit_tfm(struct crypto_skcipher *tfm) static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx) { - tctx->engine_ctx.op.prepare_request = NULL; tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request; - tctx->engine_ctx.op.unprepare_request = NULL; return 0; } diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c index 2269df17514c..e91e570b7ae0 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c @@ -794,9 +794,7 @@ static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id) if (!tctx->curve) return -EOPNOTSUPP; - tctx->engine_ctx.op.prepare_request = NULL; tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request; - tctx->engine_ctx.op.unprepare_request = NULL; return 0; } From c752c01389673b76c479d53cdd8f37558a0e406b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:19 +0800 Subject: [PATCH 115/149] crypto: omap - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes-gcm.c | 14 +++++++------- drivers/crypto/omap-aes.c | 19 ++++--------------- drivers/crypto/omap-des.c | 19 ++++--------------- drivers/crypto/omap-sham.c | 6 ++++-- 4 files changed, 19 insertions(+), 39 deletions(-) diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c index 9f937bdc53a7..d02363e976e7 100644 --- a/drivers/crypto/omap-aes-gcm.c +++ b/drivers/crypto/omap-aes-gcm.c @@ -212,12 +212,10 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, return 0; } -static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq) +static int omap_aes_gcm_prepare_req(struct aead_request *req, + struct omap_aes_dev *dd) { - struct aead_request *req = container_of(areq, struct aead_request, - base); struct omap_aes_reqctx *rctx = aead_request_ctx(req); - struct omap_aes_dev *dd = rctx->dd; struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); int err; @@ -362,11 +360,15 @@ static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq) base); struct omap_aes_reqctx *rctx = aead_request_ctx(req); struct omap_aes_dev *dd = rctx->dd; - int ret = 0; + int ret; if (!dd) return -ENODEV; + ret = omap_aes_gcm_prepare_req(req, dd); + if (ret) + return ret; + if (dd->in_sg_len) ret = omap_aes_crypt_dma_start(dd); else @@ -379,8 +381,6 @@ int omap_aes_gcm_cra_init(struct crypto_aead *tfm) { struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); - ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req; - ctx->enginectx.op.unprepare_request = NULL; ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req; crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 67a99c760bc4..d6fb8676f6cc 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -426,20 +426,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, return 0; } -static int omap_aes_prepare_req(struct crypto_engine *engine, - void *areq) +static int omap_aes_prepare_req(struct skcipher_request *req, + struct omap_aes_dev *dd) { - struct skcipher_request *req = container_of(areq, struct skcipher_request, base); struct omap_aes_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); - struct omap_aes_dev *dd = rctx->dd; int ret; u16 flags; - if (!dd) - return -ENODEV; - /* assign new request to device */ dd->req = req; dd->total = req->cryptlen; @@ -491,7 +486,8 @@ static int omap_aes_crypt_req(struct crypto_engine *engine, if (!dd) return -ENODEV; - return omap_aes_crypt_dma_start(dd); + return omap_aes_prepare_req(req, dd) ?: + omap_aes_crypt_dma_start(dd); } static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf) @@ -629,11 +625,6 @@ static int omap_aes_ctr_decrypt(struct skcipher_request *req) return omap_aes_crypt(req, FLAGS_CTR); } -static int omap_aes_prepare_req(struct crypto_engine *engine, - void *req); -static int omap_aes_crypt_req(struct crypto_engine *engine, - void *req); - static int omap_aes_init_tfm(struct crypto_skcipher *tfm) { const char *name = crypto_tfm_alg_name(&tfm->base); @@ -649,8 +640,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) + crypto_skcipher_reqsize(blk)); - ctx->enginectx.op.prepare_request = omap_aes_prepare_req; - ctx->enginectx.op.unprepare_request = NULL; ctx->enginectx.op.do_one_request = omap_aes_crypt_req; return 0; diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 371a51094e34..29a3b4c9edaf 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -522,20 +522,15 @@ static int omap_des_handle_queue(struct omap_des_dev *dd, return 0; } -static int omap_des_prepare_req(struct crypto_engine *engine, - void *areq) +static int omap_des_prepare_req(struct skcipher_request *req, + struct omap_des_dev *dd) { - struct skcipher_request *req = container_of(areq, struct skcipher_request, base); struct omap_des_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); - struct omap_des_dev *dd = omap_des_find_dev(ctx); struct omap_des_reqctx *rctx; int ret; u16 flags; - if (!dd) - return -ENODEV; - /* assign new request to device */ dd->req = req; dd->total = req->cryptlen; @@ -590,7 +585,8 @@ static int omap_des_crypt_req(struct crypto_engine *engine, if (!dd) return -ENODEV; - return omap_des_crypt_dma_start(dd); + return omap_des_prepare_req(req, dd) ?: + omap_des_crypt_dma_start(dd); } static void omap_des_done_task(unsigned long data) @@ -709,11 +705,6 @@ static int omap_des_cbc_decrypt(struct skcipher_request *req) return omap_des_crypt(req, FLAGS_CBC); } -static int omap_des_prepare_req(struct crypto_engine *engine, - void *areq); -static int omap_des_crypt_req(struct crypto_engine *engine, - void *areq); - static int omap_des_init_tfm(struct crypto_skcipher *tfm) { struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -722,8 +713,6 @@ static int omap_des_init_tfm(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx)); - ctx->enginectx.op.prepare_request = omap_des_prepare_req; - ctx->enginectx.op.unprepare_request = NULL; ctx->enginectx.op.do_one_request = omap_des_crypt_req; return 0; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index cbeda59c6b19..2ef92301969f 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1074,6 +1074,10 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq) dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d", ctx->op, ctx->total, ctx->digcnt, final); + err = omap_sham_prepare_request(engine, areq); + if (err) + return err; + err = pm_runtime_resume_and_get(dd->dev); if (err < 0) { dev_err(dd->dev, "failed to get sync: %d\n", err); @@ -1350,8 +1354,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } tctx->enginectx.op.do_one_request = omap_sham_hash_one_req; - tctx->enginectx.op.prepare_request = omap_sham_prepare_request; - tctx->enginectx.op.unprepare_request = NULL; return 0; } From c66c17a0f69b0e017bbc01d999a28ed96ee84826 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:22 +0800 Subject: [PATCH 116/149] crypto: rk3288 - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/rockchip/rk3288_crypto_ahash.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index a78ff3dcd0b1..1519aa0a0f7c 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -240,14 +240,13 @@ static int rk_hash_prepare(struct crypto_engine *engine, void *breq) return 0; } -static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) +static void rk_hash_unprepare(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct rk_crypto_info *rkc = rctx->dev; dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); - return 0; } static int rk_hash_run(struct crypto_engine *engine, void *breq) @@ -259,7 +258,7 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); struct scatterlist *sg = areq->src; struct rk_crypto_info *rkc = rctx->dev; - int err = 0; + int err; int i; u32 v; @@ -267,6 +266,10 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) if (err) return err; + err = rk_hash_prepare(engine, breq); + if (err) + goto theend; + rctx->mode = 0; algt->stat_req++; @@ -327,6 +330,8 @@ theend: crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); + rk_hash_unprepare(engine, breq); + return 0; } @@ -350,8 +355,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) crypto_ahash_reqsize(tctx->fallback_tfm)); tctx->enginectx.op.do_one_request = rk_hash_run; - tctx->enginectx.op.prepare_request = rk_hash_prepare; - tctx->enginectx.op.unprepare_request = rk_hash_unprepare; return 0; } From 50c546d70af866ea5cbeb39f5007df96ea27c41e Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:24 +0800 Subject: [PATCH 117/149] crypto: jh1100 - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/starfive/jh7110-aes.c | 54 ++++++--------------------- drivers/crypto/starfive/jh7110-hash.c | 7 ---- 2 files changed, 11 insertions(+), 50 deletions(-) diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c index 278dfa4aa743..777656cbb7ce 100644 --- a/drivers/crypto/starfive/jh7110-aes.c +++ b/drivers/crypto/starfive/jh7110-aes.c @@ -518,8 +518,13 @@ static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq) struct starfive_cryp_dev *cryp = ctx->cryp; u32 block[AES_BLOCK_32]; u32 stat; + int err; int i; + err = starfive_aes_prepare_req(req, NULL); + if (err) + return err; + /* * Write first plain/ciphertext block to start the module * then let irq tasklet handle the rest of the data blocks. @@ -538,15 +543,6 @@ static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq) return 0; } -static int starfive_aes_skcipher_prepare_req(struct crypto_engine *engine, - void *areq) -{ - struct skcipher_request *req = - container_of(areq, struct skcipher_request, base); - - return starfive_aes_prepare_req(req, NULL); -} - static int starfive_aes_init_tfm(struct crypto_skcipher *tfm) { struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -559,21 +555,10 @@ static int starfive_aes_init_tfm(struct crypto_skcipher *tfm) sizeof(struct skcipher_request)); ctx->enginectx.op.do_one_request = starfive_aes_do_one_req; - ctx->enginectx.op.prepare_request = starfive_aes_skcipher_prepare_req; - ctx->enginectx.op.unprepare_request = NULL; return 0; } -static void starfive_aes_exit_tfm(struct crypto_skcipher *tfm) -{ - struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); - - ctx->enginectx.op.do_one_request = NULL; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; -} - static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = @@ -584,8 +569,13 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq struct starfive_cryp_request_ctx *rctx = ctx->rctx; u32 block[AES_BLOCK_32]; u32 stat; + int err; int i; + err = starfive_aes_prepare_req(NULL, req); + if (err) + return err; + if (!cryp->assoclen) goto write_text; @@ -625,14 +615,6 @@ finish_req: return 0; } -static int starfive_aes_aead_prepare_req(struct crypto_engine *engine, void *areq) -{ - struct aead_request *req = - container_of(areq, struct aead_request, base); - - return starfive_aes_prepare_req(NULL, req); -} - static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); @@ -657,8 +639,6 @@ static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm) sizeof(struct aead_request)); ctx->enginectx.op.do_one_request = starfive_aes_aead_do_one_req; - ctx->enginectx.op.prepare_request = starfive_aes_aead_prepare_req; - ctx->enginectx.op.unprepare_request = NULL; return 0; } @@ -667,14 +647,7 @@ static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); - if (ctx->aead_fbk) { - crypto_free_aead(ctx->aead_fbk); - ctx->aead_fbk = NULL; - } - - ctx->enginectx.op.do_one_request = NULL; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; + crypto_free_aead(ctx->aead_fbk); } static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags) @@ -874,7 +847,6 @@ static int starfive_aes_ccm_decrypt(struct aead_request *req) static struct skcipher_alg skcipher_algs[] = { { .init = starfive_aes_init_tfm, - .exit = starfive_aes_exit_tfm, .setkey = starfive_aes_setkey, .encrypt = starfive_aes_ecb_encrypt, .decrypt = starfive_aes_ecb_decrypt, @@ -892,7 +864,6 @@ static struct skcipher_alg skcipher_algs[] = { }, }, { .init = starfive_aes_init_tfm, - .exit = starfive_aes_exit_tfm, .setkey = starfive_aes_setkey, .encrypt = starfive_aes_cbc_encrypt, .decrypt = starfive_aes_cbc_decrypt, @@ -911,7 +882,6 @@ static struct skcipher_alg skcipher_algs[] = { }, }, { .init = starfive_aes_init_tfm, - .exit = starfive_aes_exit_tfm, .setkey = starfive_aes_setkey, .encrypt = starfive_aes_ctr_encrypt, .decrypt = starfive_aes_ctr_decrypt, @@ -930,7 +900,6 @@ static struct skcipher_alg skcipher_algs[] = { }, }, { .init = starfive_aes_init_tfm, - .exit = starfive_aes_exit_tfm, .setkey = starfive_aes_setkey, .encrypt = starfive_aes_cfb_encrypt, .decrypt = starfive_aes_cfb_decrypt, @@ -949,7 +918,6 @@ static struct skcipher_alg skcipher_algs[] = { }, }, { .init = starfive_aes_init_tfm, - .exit = starfive_aes_exit_tfm, .setkey = starfive_aes_setkey, .encrypt = starfive_aes_ofb_encrypt, .decrypt = starfive_aes_ofb_decrypt, diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c index 5064150b8a1c..7fe89cd13336 100644 --- a/drivers/crypto/starfive/jh7110-hash.c +++ b/drivers/crypto/starfive/jh7110-hash.c @@ -434,8 +434,6 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash, ctx->hash_mode = mode; ctx->enginectx.op.do_one_request = starfive_hash_one_request; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; return 0; } @@ -445,11 +443,6 @@ static void starfive_hash_exit_tfm(struct crypto_ahash *hash) struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); crypto_free_ahash(ctx->ahash_fbk); - - ctx->ahash_fbk = NULL; - ctx->enginectx.op.do_one_request = NULL; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; } static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx, From 6912b79da85a445b85d4da37a9e5e49821b63e18 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:26 +0800 Subject: [PATCH 118/149] crypto: stm32 - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-cryp.c | 37 ++++++------------------------- 1 file changed, 7 insertions(+), 30 deletions(-) diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 6b8d731092a4..07e32b8dbe29 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -825,8 +825,6 @@ static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) } static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq); -static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine, - void *areq); static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm) { @@ -835,14 +833,10 @@ static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx)); ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req; - ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req; - ctx->enginectx.op.unprepare_request = NULL; return 0; } static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq); -static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, - void *areq); static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm) { @@ -851,8 +845,6 @@ static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm) tfm->reqsize = sizeof(struct stm32_cryp_reqctx); ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req; - ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req; - ctx->enginectx.op.unprepare_request = NULL; return 0; } @@ -1180,9 +1172,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, cryp = ctx->cryp; - if (!cryp) - return -ENODEV; - rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq); rctx->mode &= FLG_MODE_MASK; @@ -1248,16 +1237,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, return ret; } -static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine, - void *areq) -{ - struct skcipher_request *req = container_of(areq, - struct skcipher_request, - base); - - return stm32_cryp_prepare_req(req, NULL); -} - static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq) { struct skcipher_request *req = container_of(areq, @@ -1270,15 +1249,8 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq) if (!cryp) return -ENODEV; - return stm32_cryp_cpu_start(cryp); -} - -static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq) -{ - struct aead_request *req = container_of(areq, struct aead_request, - base); - - return stm32_cryp_prepare_req(NULL, req); + return stm32_cryp_prepare_req(req, NULL) ?: + stm32_cryp_cpu_start(cryp); } static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) @@ -1287,10 +1259,15 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) base); struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct stm32_cryp *cryp = ctx->cryp; + int err; if (!cryp) return -ENODEV; + err = stm32_cryp_prepare_req(NULL, req); + if (err) + return err; + if (unlikely(!cryp->payload_in && !cryp->header_in)) { /* No input data to process: get tag and finish */ stm32_cryp_finish_req(cryp, 0); From fc0bdcb8303b70bc0c1b5fbf236a4882f254cf94 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:28 +0800 Subject: [PATCH 119/149] crypto: virtio - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_akcipher_algs.c | 2 -- drivers/crypto/virtio/virtio_crypto_skcipher_algs.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 6963344f6a3a..ff3369ca319f 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -476,8 +476,6 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) ctx->tfm = tfm; ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; akcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_akcipher_request)); diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index e5876286828b..71b8751ab5ab 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -524,8 +524,6 @@ static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm) ctx->tfm = tfm; ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req; - ctx->enginectx.op.prepare_request = NULL; - ctx->enginectx.op.unprepare_request = NULL; return 0; } From be8b8a950f0ae601c8ff841b9dcc899f83054025 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:30 +0800 Subject: [PATCH 120/149] crypto: zynqmp - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- drivers/crypto/xilinx/zynqmp-aes-gcm.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index bf1f421e05f2..ae7dd82abea1 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -316,8 +316,6 @@ static int zynqmp_aes_aead_init(struct crypto_aead *aead) tfm_ctx->dev = drv_ctx->dev; tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req; - tfm_ctx->engine_ctx.op.prepare_request = NULL; - tfm_ctx->engine_ctx.op.unprepare_request = NULL; tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name, 0, From bcd6e41d983621954dfc3f1f64249a55838b3e6a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:32 +0800 Subject: [PATCH 121/149] crypto: engine - Remove prepare/unprepare request The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 42 +---------------------------------------- include/crypto/engine.h | 6 ------ 2 files changed, 1 insertion(+), 47 deletions(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 74fcc0897041..17f7955500a0 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -26,9 +26,6 @@ static void crypto_finalize_request(struct crypto_engine *engine, struct crypto_async_request *req, int err) { unsigned long flags; - bool finalize_req = false; - int ret; - struct crypto_engine_ctx *enginectx; /* * If hardware cannot enqueue more requests @@ -38,21 +35,11 @@ static void crypto_finalize_request(struct crypto_engine *engine, if (!engine->retry_support) { spin_lock_irqsave(&engine->queue_lock, flags); if (engine->cur_req == req) { - finalize_req = true; engine->cur_req = NULL; } spin_unlock_irqrestore(&engine->queue_lock, flags); } - if (finalize_req || engine->retry_support) { - enginectx = crypto_tfm_ctx(req->tfm); - if (enginectx->op.prepare_request && - enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, req); - if (ret) - dev_err(engine->dev, "failed to unprepare request\n"); - } - } lockdep_assert_in_softirq(); crypto_request_complete(req, err); @@ -141,20 +128,12 @@ start_request: ret = engine->prepare_crypt_hardware(engine); if (ret) { dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err_2; + goto req_err_1; } } enginectx = crypto_tfm_ctx(async_req->tfm); - if (enginectx->op.prepare_request) { - ret = enginectx->op.prepare_request(engine, async_req); - if (ret) { - dev_err(engine->dev, "failed to prepare request: %d\n", - ret); - goto req_err_2; - } - } if (!enginectx->op.do_one_request) { dev_err(engine->dev, "failed to do request\n"); ret = -EINVAL; @@ -177,18 +156,6 @@ start_request: ret); goto req_err_1; } - /* - * If retry mechanism is supported, - * unprepare current request and - * enqueue it back into crypto-engine queue. - */ - if (enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, - async_req); - if (ret) - dev_err(engine->dev, - "failed to unprepare request\n"); - } spin_lock_irqsave(&engine->queue_lock, flags); /* * If hardware was unable to execute request, enqueue it @@ -204,13 +171,6 @@ start_request: goto retry; req_err_1: - if (enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, async_req); - if (ret) - dev_err(engine->dev, "failed to unprepare request\n"); - } - -req_err_2: crypto_request_complete(async_req, ret); retry: diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 2038764b30c2..1b02f69e0a79 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -78,15 +78,9 @@ struct crypto_engine { /* * struct crypto_engine_op - crypto hardware engine operations - * @prepare_request: do some preparation if needed before handling the current request - * @unprepare_request: undo any work done by prepare_request() * @do_one_request: do encryption for current request */ struct crypto_engine_op { - int (*prepare_request)(struct crypto_engine *engine, - void *areq); - int (*unprepare_request)(struct crypto_engine *engine, - void *areq); int (*do_one_request)(struct crypto_engine *engine, void *areq); }; From 1c27c0ca220b42fb8b727d68b58693b77cbd095d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:34 +0800 Subject: [PATCH 122/149] crypto: jh7110 - Include crypto/hash.h in header file The header file jh7110-cryp uses ahash_request without including crypto/hash.h. Fix that by adding the inclusion. Signed-off-by: Herbert Xu --- drivers/crypto/starfive/jh7110-cryp.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index b6d809e8fe45..4462d1db9544 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -2,16 +2,16 @@ #ifndef __STARFIVE_STR_H__ #define __STARFIVE_STR_H__ +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include - #define STARFIVE_ALG_CR_OFFSET 0x0 #define STARFIVE_ALG_FIFO_OFFSET 0x4 #define STARFIVE_IE_MASK_OFFSET 0x8 From 68021dee251e72d87ebbf052acf69b3217c11383 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:36 +0800 Subject: [PATCH 123/149] crypto: engine - Move crypto inclusions out of header file The engine file does not need the actual crypto type definitions so move those header inclusions to where they are actually used. Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 7 ++++++- include/crypto/engine.h | 17 +++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 17f7955500a0..ba43dfba2fa9 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -7,10 +7,15 @@ * Author: Baolin Wang */ +#include +#include +#include +#include +#include +#include #include #include #include -#include #include #include "internal.h" diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 1b02f69e0a79..643639c3227c 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -7,20 +7,17 @@ #ifndef _CRYPTO_ENGINE_H #define _CRYPTO_ENGINE_H -#include -#include +#include #include -#include +#include #include -#include -#include -#include -#include -#include -#include - +struct aead_request; +struct ahash_request; +struct akcipher_request; struct device; +struct kpp_request; +struct skcipher_request; #define ENGINE_NAME_LEN 30 /* From 2d6a79cc2ade0b3e67e66393a6183284e87b87f6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:38 +0800 Subject: [PATCH 124/149] crypto: jh7110 - Include scatterwalk.h for struct scatter_walk Include crypto/scatterwalk.h explicitly instead of getting it through random header files. Signed-off-by: Herbert Xu --- drivers/crypto/starfive/jh7110-cryp.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index 4462d1db9544..345a8d878761 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include From 45c461c503a7a12f4c5efaff289be17a442aeefe Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:41 +0800 Subject: [PATCH 125/149] crypto: engine - Create internal/engine.h Create crypto/internal/engine.h to house details that should not be used by drivers. It is empty for the time being. Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 2 +- include/crypto/internal/engine.h | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 include/crypto/internal/engine.h diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index ba43dfba2fa9..a75162bc1bf4 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -9,8 +9,8 @@ #include #include -#include #include +#include #include #include #include diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h new file mode 100644 index 000000000000..ffa1bb39d5e4 --- /dev/null +++ b/include/crypto/internal/engine.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Crypto engine API + * + * Copyright (c) 2016 Baolin Wang + * Copyright (c) 2023 Herbert Xu + */ +#ifndef _CRYPTO_INTERNAL_ENGINE_H +#define _CRYPTO_INTERNAL_ENGINE_H + +#include + +#endif From b7b23ccbda5d956a8f605da35685ff5f53c0f3b9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:43 +0800 Subject: [PATCH 126/149] crypto: omap - Include internal/engine.h Inlucde internal/engine.h because this driver uses directly accesses attributes inside struct crypto_engine. Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index d6fb8676f6cc..ad0d8db086db 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -13,28 +13,26 @@ #define prn(num) pr_debug(#num "=%d\n", num) #define prx(num) pr_debug(#num "=%x\n", num) -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include -#include +#include +#include +#include +#include +#include +#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include #include "omap-crypto.h" #include "omap-aes.h" From 4ac1a2d88d8ff2b7d6decb40f712bf9ffddcd8c9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:45 +0800 Subject: [PATCH 127/149] crypto: caam - Include internal/engine.h Inlucde internal/engine.h because this driver uses directly accesses attributes inside struct crypto_engine. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 4 ++-- drivers/crypto/caam/caamhash.c | 2 +- drivers/crypto/caam/caampkc.c | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index feb86013dbf6..da8182ee86fe 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -56,9 +56,9 @@ #include "sg_sw_sec4.h" #include "key_gen.h" #include "caamalg_desc.h" -#include -#include #include +#include +#include #include #include #include diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 641c3afd59ca..9ef25387f6b6 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -65,7 +65,7 @@ #include "sg_sw_sec4.h" #include "key_gen.h" #include "caamhash_desc.h" -#include +#include #include #include diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 7e08af751e4e..1809c97d04be 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -16,6 +16,7 @@ #include "desc_constr.h" #include "sg_sw_sec4.h" #include "caampkc.h" +#include #include #include From c1091e2baef65b24a39db6a61de316605d3505c5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:47 +0800 Subject: [PATCH 128/149] crypto: engine - Move struct crypto_engine into internal/engine.h Most drivers should not access the internal details of struct crypto_engine. Move it into the internal header file. Signed-off-by: Herbert Xu --- include/crypto/engine.h | 58 +----------------------------- include/crypto/internal/engine.h | 61 ++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 57 deletions(-) diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 643639c3227c..cc9c7fd1c4d9 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -7,72 +7,16 @@ #ifndef _CRYPTO_ENGINE_H #define _CRYPTO_ENGINE_H -#include -#include -#include #include struct aead_request; struct ahash_request; struct akcipher_request; +struct crypto_engine; struct device; struct kpp_request; struct skcipher_request; -#define ENGINE_NAME_LEN 30 -/* - * struct crypto_engine - crypto hardware engine - * @name: the engine name - * @idling: the engine is entering idle state - * @busy: request pump is busy - * @running: the engine is on working - * @retry_support: indication that the hardware allows re-execution - * of a failed backlog request - * crypto-engine, in head position to keep order - * @list: link with the global crypto engine list - * @queue_lock: spinlock to synchronise access to request queue - * @queue: the crypto queue of the engine - * @rt: whether this queue is set to run as a realtime task - * @prepare_crypt_hardware: a request will soon arrive from the queue - * so the subsystem requests the driver to prepare the hardware - * by issuing this call - * @unprepare_crypt_hardware: there are currently no more requests on the - * queue so the subsystem notifies the driver that it may relax the - * hardware by issuing this call - * @do_batch_requests: execute a batch of requests. Depends on multiple - * requests support. - * @kworker: kthread worker struct for request pump - * @pump_requests: work struct for scheduling work to the request pump - * @priv_data: the engine private data - * @cur_req: the current request which is on processing - */ -struct crypto_engine { - char name[ENGINE_NAME_LEN]; - bool idling; - bool busy; - bool running; - - bool retry_support; - - struct list_head list; - spinlock_t queue_lock; - struct crypto_queue queue; - struct device *dev; - - bool rt; - - int (*prepare_crypt_hardware)(struct crypto_engine *engine); - int (*unprepare_crypt_hardware)(struct crypto_engine *engine); - int (*do_batch_requests)(struct crypto_engine *engine); - - - struct kthread_worker *kworker; - struct kthread_work pump_requests; - - void *priv_data; - struct crypto_async_request *cur_req; -}; - /* * struct crypto_engine_op - crypto hardware engine operations * @do_one_request: do encryption for current request diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h index ffa1bb39d5e4..fbf4be56cf12 100644 --- a/include/crypto/internal/engine.h +++ b/include/crypto/internal/engine.h @@ -8,6 +8,67 @@ #ifndef _CRYPTO_INTERNAL_ENGINE_H #define _CRYPTO_INTERNAL_ENGINE_H +#include #include +#include +#include +#include + +#define ENGINE_NAME_LEN 30 + +struct device; + +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @retry_support: indication that the hardware allows re-execution + * of a failed backlog request + * crypto-engine, in head position to keep order + * @list: link with the global crypto engine list + * @queue_lock: spinlock to synchronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @do_batch_requests: execute a batch of requests. Depends on multiple + * requests support. + * @kworker: kthread worker struct for request pump + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + + bool retry_support; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + struct device *dev; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + int (*do_batch_requests)(struct crypto_engine *engine); + + + struct kthread_worker *kworker; + struct kthread_work pump_requests; + + void *priv_data; + struct crypto_async_request *cur_req; +}; #endif From e5e7eb023f24653b07329162b6359283b3a03a20 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:49 +0800 Subject: [PATCH 129/149] crypto: engine - Move crypto_engine_ops from request into crypto_alg Rather than having the callback in the request, move it into the crypto_alg object. This avoids having crypto_engine look into the request context is private to the driver. Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 213 +++++++++++++++++++++++++++++++++++++--- include/crypto/engine.h | 59 ++++++++++- 2 files changed, 256 insertions(+), 16 deletions(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index a75162bc1bf4..abfb1e6bfa48 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -7,20 +7,30 @@ * Author: Baolin Wang */ -#include -#include -#include +#include +#include #include -#include -#include +#include +#include +#include #include #include #include +#include +#include #include #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 +/* Temporary algorithm flag used to indicate an updated driver. */ +#define CRYPTO_ALG_ENGINE 0x200 + +struct crypto_engine_alg { + struct crypto_alg base; + struct crypto_engine_op op; +}; + /** * crypto_finalize_request - finalize one request if the request is done * @engine: the hardware engine @@ -64,6 +74,8 @@ static void crypto_pump_requests(struct crypto_engine *engine, bool in_kthread) { struct crypto_async_request *async_req, *backlog; + struct crypto_engine_alg *alg; + struct crypto_engine_op *op; unsigned long flags; bool was_busy = false; int ret; @@ -137,15 +149,22 @@ start_request: } } - enginectx = crypto_tfm_ctx(async_req->tfm); + if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) { + alg = container_of(async_req->tfm->__crt_alg, + struct crypto_engine_alg, base); + op = &alg->op; + } else { + enginectx = crypto_tfm_ctx(async_req->tfm); + op = &enginectx->op; - if (!enginectx->op.do_one_request) { - dev_err(engine->dev, "failed to do request\n"); - ret = -EINVAL; - goto req_err_1; + if (!op->do_one_request) { + dev_err(engine->dev, "failed to do request\n"); + ret = -EINVAL; + goto req_err_1; + } } - ret = enginectx->op.do_one_request(engine, async_req); + ret = op->do_one_request(engine, async_req); /* Request unsuccessfully executed by hardware */ if (ret < 0) { @@ -556,5 +575,177 @@ int crypto_engine_exit(struct crypto_engine *engine) } EXPORT_SYMBOL_GPL(crypto_engine_exit); +int crypto_engine_register_aead(struct aead_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + + alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; + + return crypto_register_aead(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_aead); + +void crypto_engine_unregister_aead(struct aead_engine_alg *alg) +{ + crypto_unregister_aead(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead); + +int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_engine_register_aead(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + crypto_engine_unregister_aeads(algs, i); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_engine_register_aeads); + +void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_engine_unregister_aead(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads); + +int crypto_engine_register_ahash(struct ahash_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + + alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE; + + return crypto_register_ahash(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_ahash); + +void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg) +{ + crypto_unregister_ahash(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash); + +int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_engine_register_ahash(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + crypto_engine_unregister_ahashes(algs, i); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes); + +void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs, + int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_engine_unregister_ahash(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes); + +int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + + alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; + + return crypto_register_akcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher); + +void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg) +{ + crypto_unregister_akcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher); + +int crypto_engine_register_kpp(struct kpp_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + + alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; + + return crypto_register_kpp(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); + +void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg) +{ + crypto_unregister_kpp(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp); + +int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg) +{ + if (!alg->op.do_one_request) + return -EINVAL; + + alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; + + return crypto_register_skcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher); + +void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg) +{ + return crypto_unregister_skcipher(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher); + +int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs, + int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_engine_register_skcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + crypto_engine_unregister_skciphers(algs, i); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers); + +void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs, + int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_engine_unregister_skcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Crypto hardware engine framework"); diff --git a/include/crypto/engine.h b/include/crypto/engine.h index cc9c7fd1c4d9..cf57e732566b 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -7,15 +7,15 @@ #ifndef _CRYPTO_ENGINE_H #define _CRYPTO_ENGINE_H +#include +#include +#include +#include +#include #include -struct aead_request; -struct ahash_request; -struct akcipher_request; struct crypto_engine; struct device; -struct kpp_request; -struct skcipher_request; /* * struct crypto_engine_op - crypto hardware engine operations @@ -30,6 +30,31 @@ struct crypto_engine_ctx { struct crypto_engine_op op; }; +struct aead_engine_alg { + struct aead_alg base; + struct crypto_engine_op op; +}; + +struct ahash_engine_alg { + struct ahash_alg base; + struct crypto_engine_op op; +}; + +struct akcipher_engine_alg { + struct akcipher_alg base; + struct crypto_engine_op op; +}; + +struct kpp_engine_alg { + struct kpp_alg base; + struct crypto_engine_op op; +}; + +struct skcipher_engine_alg { + struct skcipher_alg base; + struct crypto_engine_op op; +}; + int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, struct aead_request *req); int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, @@ -59,4 +84,28 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool rt, int qlen); int crypto_engine_exit(struct crypto_engine *engine); +int crypto_engine_register_aead(struct aead_engine_alg *alg); +void crypto_engine_unregister_aead(struct aead_engine_alg *alg); +int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count); +void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count); + +int crypto_engine_register_ahash(struct ahash_engine_alg *alg); +void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg); +int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count); +void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs, + int count); + +int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg); +void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg); + +int crypto_engine_register_kpp(struct kpp_engine_alg *alg); +void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg); + +int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg); +void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg); +int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs, + int count); +void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs, + int count); + #endif /* _CRYPTO_ENGINE_H */ From 07e34cd39282af37160c978abf18308d5bb287e3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:51 +0800 Subject: [PATCH 130/149] crypto: sun8i-ce - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- .../allwinner/sun8i-ce/sun8i-ce-cipher.c | 25 +-- .../crypto/allwinner/sun8i-ce/sun8i-ce-core.c | 171 +++++++++++------- .../crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 98 +++++----- drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h | 13 +- 4 files changed, 181 insertions(+), 126 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 573a08fa7afa..8d4c42863a62 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -29,7 +29,7 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) struct sun8i_ce_alg_template *algt; unsigned int todo, len; - algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG || sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) { @@ -92,13 +92,18 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct sun8i_ce_alg_template *algt; - algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); - algt->stat_fb++; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ce_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ce_alg_template, + alg.skcipher.base); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + algt->stat_fb++; #endif + } skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, @@ -133,7 +138,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req int ns = sg_nents_for_len(areq->src, areq->cryptlen); int nd = sg_nents_for_len(areq->dst, areq->cryptlen); - algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, crypto_tfm_alg_name(areq->base.tfm), @@ -355,7 +360,7 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine, dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); } -static int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq) +int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq) { int err = sun8i_ce_cipher_prepare(engine, areq); @@ -416,7 +421,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); - algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); op->ce = algt->ce; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -433,8 +438,6 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), CRYPTO_MAX_ALG_NAME); - op->enginectx.op.do_one_request = sun8i_ce_cipher_do_one; - err = pm_runtime_get_sync(op->ce->dev); if (err < 0) goto error_pm; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 07ea0cc82b16..1c2c32d0568e 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -9,21 +9,25 @@ * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ + +#include +#include +#include +#include #include -#include #include #include +#include #include #include #include +#include #include #include #include #include #include #include -#include -#include #include "sun8i-ce.h" @@ -277,7 +281,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_AES, .ce_blockmode = CE_ID_OP_CBC, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-sun8i-ce", @@ -298,13 +302,16 @@ static struct sun8i_ce_alg_template ce_algs[] = { .setkey = sun8i_ce_aes_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ce_cipher_do_one, + }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_AES, .ce_blockmode = CE_ID_OP_ECB, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-sun8i-ce", @@ -324,13 +331,16 @@ static struct sun8i_ce_alg_template ce_algs[] = { .setkey = sun8i_ce_aes_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ce_cipher_do_one, + }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_DES3, .ce_blockmode = CE_ID_OP_CBC, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3-sun8i-ce", @@ -351,13 +361,16 @@ static struct sun8i_ce_alg_template ce_algs[] = { .setkey = sun8i_ce_des3_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ce_cipher_do_one, + }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_DES3, .ce_blockmode = CE_ID_OP_ECB, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-sun8i-ce", @@ -377,12 +390,15 @@ static struct sun8i_ce_alg_template ce_algs[] = { .setkey = sun8i_ce_des3_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ce_cipher_do_one, + }, }, #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_MD5, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, @@ -390,6 +406,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, + .init_tfm = sun8i_ce_hash_init_tfm, + .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct md5_state), @@ -404,15 +422,17 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ce_hash_crainit, - .cra_exit = sun8i_ce_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ce_hash_run, + }, + }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA1, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, @@ -420,6 +440,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, + .init_tfm = sun8i_ce_hash_init_tfm, + .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), @@ -434,15 +456,16 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ce_hash_crainit, - .cra_exit = sun8i_ce_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ce_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA224, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, @@ -450,6 +473,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, + .init_tfm = sun8i_ce_hash_init_tfm, + .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), @@ -464,15 +489,16 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ce_hash_crainit, - .cra_exit = sun8i_ce_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ce_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA256, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, @@ -480,6 +506,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, + .init_tfm = sun8i_ce_hash_init_tfm, + .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), @@ -494,15 +522,16 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ce_hash_crainit, - .cra_exit = sun8i_ce_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ce_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA384, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, @@ -510,6 +539,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, + .init_tfm = sun8i_ce_hash_init_tfm, + .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), @@ -524,15 +555,16 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ce_hash_crainit, - .cra_exit = sun8i_ce_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ce_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA512, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, @@ -540,6 +572,8 @@ static struct sun8i_ce_alg_template ce_algs[] = { .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, + .init_tfm = sun8i_ce_hash_init_tfm, + .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), @@ -554,11 +588,12 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ce_hash_crainit, - .cra_exit = sun8i_ce_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ce_hash_run, + }, }, #endif #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG @@ -582,14 +617,18 @@ static struct sun8i_ce_alg_template ce_algs[] = { #endif }; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) { - struct sun8i_ce_dev *ce = seq->private; + struct sun8i_ce_dev *ce __maybe_unused = seq->private; unsigned int i; for (i = 0; i < MAXFLOW; i++) - seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req); + seq_printf(seq, "Channel %d: nreq %lu\n", i, +#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG + ce->chanlist[i].stat_req); +#else + 0ul); +#endif for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { if (!ce_algs[i].ce) @@ -597,8 +636,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", - ce_algs[i].alg.skcipher.base.cra_driver_name, - ce_algs[i].alg.skcipher.base.cra_name, + ce_algs[i].alg.skcipher.base.base.cra_driver_name, + ce_algs[i].alg.skcipher.base.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", ce_algs[i].fbname); @@ -621,8 +660,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) break; case CRYPTO_ALG_TYPE_AHASH: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", - ce_algs[i].alg.hash.halg.base.cra_driver_name, - ce_algs[i].alg.hash.halg.base.cra_name, + ce_algs[i].alg.hash.base.halg.base.cra_driver_name, + ce_algs[i].alg.hash.base.halg.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", ce_algs[i].fbname); @@ -643,7 +682,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) break; } } -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG +#if defined(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) && \ + defined(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG) seq_printf(seq, "HWRNG %lu %lu\n", ce->hwrng_stat_req, ce->hwrng_stat_bytes); #endif @@ -651,7 +691,6 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs); -#endif static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i) { @@ -839,7 +878,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) if (ce_method == CE_ID_NOTSUPP) { dev_dbg(ce->dev, "DEBUG: Algo of %s not supported\n", - ce_algs[i].alg.skcipher.base.cra_name); + ce_algs[i].alg.skcipher.base.base.cra_name); ce_algs[i].ce = NULL; break; } @@ -847,16 +886,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) ce_method = ce->variant->op_mode[id]; if (ce_method == CE_ID_NOTSUPP) { dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n", - ce_algs[i].alg.skcipher.base.cra_name); + ce_algs[i].alg.skcipher.base.base.cra_name); ce_algs[i].ce = NULL; break; } dev_info(ce->dev, "Register %s\n", - ce_algs[i].alg.skcipher.base.cra_name); - err = crypto_register_skcipher(&ce_algs[i].alg.skcipher); + ce_algs[i].alg.skcipher.base.base.cra_name); + err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher); if (err) { dev_err(ce->dev, "ERROR: Fail to register %s\n", - ce_algs[i].alg.skcipher.base.cra_name); + ce_algs[i].alg.skcipher.base.base.cra_name); ce_algs[i].ce = NULL; return err; } @@ -867,16 +906,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) if (ce_method == CE_ID_NOTSUPP) { dev_info(ce->dev, "DEBUG: Algo of %s not supported\n", - ce_algs[i].alg.hash.halg.base.cra_name); + ce_algs[i].alg.hash.base.halg.base.cra_name); ce_algs[i].ce = NULL; break; } dev_info(ce->dev, "Register %s\n", - ce_algs[i].alg.hash.halg.base.cra_name); - err = crypto_register_ahash(&ce_algs[i].alg.hash); + ce_algs[i].alg.hash.base.halg.base.cra_name); + err = crypto_engine_register_ahash(&ce_algs[i].alg.hash); if (err) { dev_err(ce->dev, "ERROR: Fail to register %s\n", - ce_algs[i].alg.hash.halg.base.cra_name); + ce_algs[i].alg.hash.base.halg.base.cra_name); ce_algs[i].ce = NULL; return err; } @@ -916,13 +955,13 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce) switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(ce->dev, "Unregister %d %s\n", i, - ce_algs[i].alg.skcipher.base.cra_name); - crypto_unregister_skcipher(&ce_algs[i].alg.skcipher); + ce_algs[i].alg.skcipher.base.base.cra_name); + crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher); break; case CRYPTO_ALG_TYPE_AHASH: dev_info(ce->dev, "Unregister %d %s\n", i, - ce_algs[i].alg.hash.halg.base.cra_name); - crypto_unregister_ahash(&ce_algs[i].alg.hash); + ce_algs[i].alg.hash.base.halg.base.cra_name); + crypto_engine_unregister_ahash(&ce_algs[i].alg.hash); break; case CRYPTO_ALG_TYPE_RNG: dev_info(ce->dev, "Unregister %d %s\n", i, @@ -1007,13 +1046,21 @@ static int sun8i_ce_probe(struct platform_device *pdev) pm_runtime_put_sync(ce->dev); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct dentry *dbgfs_dir __maybe_unused; + struct dentry *dbgfs_stats __maybe_unused; + + /* Ignore error of debugfs */ + dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL); + dbgfs_stats = debugfs_create_file("stats", 0444, + dbgfs_dir, ce, + &sun8i_ce_debugfs_fops); + #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - /* Ignore error of debugfs */ - ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL); - ce->dbgfs_stats = debugfs_create_file("stats", 0444, - ce->dbgfs_dir, ce, - &sun8i_ce_debugfs_fops); + ce->dbgfs_dir = dbgfs_dir; + ce->dbgfs_stats = dbgfs_stats; #endif + } return 0; error_alg: diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 04d7d890de58..d358334e5981 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -9,46 +9,46 @@ * * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ -#include -#include -#include -#include + #include +#include #include #include -#include +#include +#include +#include +#include +#include +#include +#include #include "sun8i-ce.h" -int sun8i_ce_hash_crainit(struct crypto_tfm *tfm) +int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) { - struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); + struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ce_alg_template *algt; int err; - memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx)); - - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); op->ce = algt->ce; - op->enginectx.op.do_one_request = sun8i_ce_hash_run; - /* FALLBACK */ - op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, + op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(algt->ce->dev, "Fallback driver could no be loaded\n"); return PTR_ERR(op->fallback_tfm); } - if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm)) - algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm); + crypto_ahash_set_statesize(tfm, + crypto_ahash_statesize(op->fallback_tfm)); - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + crypto_ahash_set_reqsize(tfm, sizeof(struct sun8i_ce_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); - memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), + memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm), CRYPTO_MAX_ALG_NAME); err = pm_runtime_get_sync(op->ce->dev); @@ -61,9 +61,9 @@ error_pm: return err; } -void sun8i_ce_hash_craexit(struct crypto_tfm *tfm) +void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm) { - struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm); + struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); crypto_free_ahash(tfmctx->fallback_tfm); pm_runtime_put_sync_suspend(tfmctx->ce->dev); @@ -112,20 +112,22 @@ int sun8i_ce_hash_final(struct ahash_request *areq) struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_alg_template *algt; -#endif ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.result = areq->result; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct sun8i_ce_alg_template *algt __maybe_unused; + struct ahash_alg *alg = crypto_ahash_alg(tfm); + + algt = container_of(alg, struct sun8i_ce_alg_template, + alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); - algt->stat_fb++; + algt->stat_fb++; #endif + } return crypto_ahash_final(&rctx->fallback_req); } @@ -150,10 +152,6 @@ int sun8i_ce_hash_finup(struct ahash_request *areq) struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_alg_template *algt; -#endif ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & @@ -162,10 +160,17 @@ int sun8i_ce_hash_finup(struct ahash_request *areq) rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct sun8i_ce_alg_template *algt __maybe_unused; + struct ahash_alg *alg = crypto_ahash_alg(tfm); + + algt = container_of(alg, struct sun8i_ce_alg_template, + alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); - algt->stat_fb++; + algt->stat_fb++; #endif + } return crypto_ahash_finup(&rctx->fallback_req); } @@ -175,10 +180,6 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ce_alg_template *algt; -#endif ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & @@ -187,10 +188,17 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { + struct sun8i_ce_alg_template *algt __maybe_unused; + struct ahash_alg *alg = crypto_ahash_alg(tfm); + + algt = container_of(alg, struct sun8i_ce_alg_template, + alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); - algt->stat_fb++; + algt->stat_fb++; #endif + } return crypto_ahash_digest(&rctx->fallback_req); } @@ -202,7 +210,7 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) struct sun8i_ce_alg_template *algt; struct scatterlist *sg; - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); if (areq->nbytes == 0) { algt->stat_fb_len0++; @@ -251,7 +259,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq) return sun8i_ce_hash_digest_fb(areq); } - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; e = sun8i_ce_get_engine_number(ce); @@ -343,11 +351,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) dma_addr_t addr_res, addr_pad; int ns = sg_nents_for_len(areq->src, areq->nbytes); - algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; - bs = algt->alg.hash.halg.base.cra_blocksize; - digestsize = algt->alg.hash.halg.digestsize; + bs = algt->alg.hash.base.halg.base.cra_blocksize; + digestsize = algt->alg.hash.base.halg.digestsize; if (digestsize == SHA224_DIGEST_SIZE) digestsize = SHA256_DIGEST_SIZE; if (digestsize == SHA384_DIGEST_SIZE) @@ -452,14 +460,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) chan->timeout = areq->nbytes; - err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); + err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); - memcpy(areq->result, result, algt->alg.hash.halg.digestsize); + memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); theend: kfree(buf); kfree(result); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 4742b48ef52e..93d4985def87 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -265,14 +265,12 @@ struct sun8i_cipher_req_ctx { /* * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM - * @enginectx: crypto_engine used by this TFM * @key: pointer to key data * @keylen: len of the key * @ce: pointer to the private data of driver handling this TFM * @fallback_tfm: pointer to the fallback TFM */ struct sun8i_cipher_tfm_ctx { - struct crypto_engine_ctx enginectx; u32 *key; u32 keylen; struct sun8i_ce_dev *ce; @@ -281,12 +279,10 @@ struct sun8i_cipher_tfm_ctx { /* * struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM - * @enginectx: crypto_engine used by this TFM * @ce: pointer to the private data of driver handling this TFM * @fallback_tfm: pointer to the fallback TFM */ struct sun8i_ce_hash_tfm_ctx { - struct crypto_engine_ctx enginectx; struct sun8i_ce_dev *ce; struct crypto_ahash *fallback_tfm; }; @@ -329,8 +325,8 @@ struct sun8i_ce_alg_template { u32 ce_blockmode; struct sun8i_ce_dev *ce; union { - struct skcipher_alg skcipher; - struct ahash_alg hash; + struct skcipher_engine_alg skcipher; + struct ahash_engine_alg hash; struct rng_alg rng; } alg; unsigned long stat_req; @@ -353,6 +349,7 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sun8i_ce_cipher_init(struct crypto_tfm *tfm); void sun8i_ce_cipher_exit(struct crypto_tfm *tfm); +int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq); int sun8i_ce_skdecrypt(struct skcipher_request *areq); int sun8i_ce_skencrypt(struct skcipher_request *areq); @@ -360,8 +357,8 @@ int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce); int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name); -int sun8i_ce_hash_crainit(struct crypto_tfm *tfm); -void sun8i_ce_hash_craexit(struct crypto_tfm *tfm); +int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm); +void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm); int sun8i_ce_hash_init(struct ahash_request *areq); int sun8i_ce_hash_export(struct ahash_request *areq, void *out); int sun8i_ce_hash_import(struct ahash_request *areq, const void *in); From 4c19e8fb5e9ce0b2b535274150605c78a4617bf9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:53 +0800 Subject: [PATCH 131/149] crypto: sun8i-ss - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- .../allwinner/sun8i-ss/sun8i-ss-cipher.c | 25 +-- .../crypto/allwinner/sun8i-ss/sun8i-ss-core.c | 154 +++++++++++------- .../crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 112 +++++++------ drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h | 17 +- 4 files changed, 177 insertions(+), 131 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 2758dadf74eb..7fa359725ec7 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -24,7 +24,7 @@ static bool sun8i_ss_need_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; struct scatterlist *sg; @@ -93,13 +93,18 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct sun8i_ss_alg_template *algt; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; - algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); - algt->stat_fb++; + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.skcipher.base); + +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + algt->stat_fb++; #endif + } + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, areq->base.complete, areq->base.data); @@ -193,7 +198,7 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen); int i; - algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, crypto_tfm_alg_name(areq->base.tfm), @@ -324,7 +329,7 @@ theend: return err; } -static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) +int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) { int err; struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); @@ -390,7 +395,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); - algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); op->ss = algt->ss; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -408,8 +413,6 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), CRYPTO_MAX_ALG_NAME); - op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; - err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) { dev_err(op->ss->dev, "pm error %d\n", err); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 3dd844b40ff7..62c001c9efc2 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -9,10 +9,14 @@ * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ + +#include +#include +#include #include -#include #include #include +#include #include #include #include @@ -23,8 +27,6 @@ #include #include #include -#include -#include #include "sun8i-ss.h" @@ -168,7 +170,7 @@ static struct sun8i_ss_alg_template ss_algs[] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_AES, .ss_blockmode = SS_ID_OP_CBC, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-sun8i-ss", @@ -189,13 +191,16 @@ static struct sun8i_ss_alg_template ss_algs[] = { .setkey = sun8i_ss_aes_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_AES, .ss_blockmode = SS_ID_OP_ECB, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-sun8i-ss", @@ -215,13 +220,16 @@ static struct sun8i_ss_alg_template ss_algs[] = { .setkey = sun8i_ss_aes_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_DES3, .ss_blockmode = SS_ID_OP_CBC, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3-sun8i-ss", @@ -242,13 +250,16 @@ static struct sun8i_ss_alg_template ss_algs[] = { .setkey = sun8i_ss_des3_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_DES3, .ss_blockmode = SS_ID_OP_ECB, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-sun8i-ss", @@ -268,7 +279,10 @@ static struct sun8i_ss_alg_template ss_algs[] = { .setkey = sun8i_ss_des3_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sun8i_ss_handle_cipher_request, + }, }, #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG { @@ -292,7 +306,7 @@ static struct sun8i_ss_alg_template ss_algs[] = { #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_MD5, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, @@ -300,6 +314,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct md5_state), @@ -314,15 +330,16 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ss_hash_crainit, - .cra_exit = sun8i_ss_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA1, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, @@ -330,6 +347,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), @@ -344,15 +363,16 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ss_hash_crainit, - .cra_exit = sun8i_ss_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA224, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, @@ -360,6 +380,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), @@ -374,15 +396,16 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ss_hash_crainit, - .cra_exit = sun8i_ss_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA256, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, @@ -390,6 +413,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), @@ -404,15 +429,16 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ss_hash_crainit, - .cra_exit = sun8i_ss_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA1, - .alg.hash = { + .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, @@ -420,6 +446,8 @@ static struct sun8i_ss_alg_template ss_algs[] = { .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, + .init_tfm = sun8i_ss_hash_init_tfm, + .exit_tfm = sun8i_ss_hash_exit_tfm, .setkey = sun8i_ss_hmac_setkey, .halg = { .digestsize = SHA1_DIGEST_SIZE, @@ -435,23 +463,28 @@ static struct sun8i_ss_alg_template ss_algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, - .cra_init = sun8i_ss_hash_crainit, - .cra_exit = sun8i_ss_hash_craexit, } } - } + }, + .alg.hash.op = { + .do_one_request = sun8i_ss_hash_run, + }, }, #endif }; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) { - struct sun8i_ss_dev *ss = seq->private; + struct sun8i_ss_dev *ss __maybe_unused = seq->private; unsigned int i; for (i = 0; i < MAXFLOW; i++) - seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req); + seq_printf(seq, "Channel %d: nreq %lu\n", i, +#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG + ss->flows[i].stat_req); +#else + 0ul); +#endif for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { if (!ss_algs[i].ss) @@ -459,8 +492,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", - ss_algs[i].alg.skcipher.base.cra_driver_name, - ss_algs[i].alg.skcipher.base.cra_name, + ss_algs[i].alg.skcipher.base.base.cra_driver_name, + ss_algs[i].alg.skcipher.base.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", @@ -482,8 +515,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) break; case CRYPTO_ALG_TYPE_AHASH: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", - ss_algs[i].alg.hash.halg.base.cra_driver_name, - ss_algs[i].alg.hash.halg.base.cra_name, + ss_algs[i].alg.hash.base.halg.base.cra_driver_name, + ss_algs[i].alg.hash.base.halg.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", ss_algs[i].fbname); @@ -502,7 +535,6 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs); -#endif static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) { @@ -659,7 +691,7 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) if (ss_method == SS_ID_NOTSUPP) { dev_info(ss->dev, "DEBUG: Algo of %s not supported\n", - ss_algs[i].alg.skcipher.base.cra_name); + ss_algs[i].alg.skcipher.base.base.cra_name); ss_algs[i].ss = NULL; break; } @@ -667,16 +699,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) ss_method = ss->variant->op_mode[id]; if (ss_method == SS_ID_NOTSUPP) { dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n", - ss_algs[i].alg.skcipher.base.cra_name); + ss_algs[i].alg.skcipher.base.base.cra_name); ss_algs[i].ss = NULL; break; } dev_info(ss->dev, "DEBUG: Register %s\n", - ss_algs[i].alg.skcipher.base.cra_name); - err = crypto_register_skcipher(&ss_algs[i].alg.skcipher); + ss_algs[i].alg.skcipher.base.base.cra_name); + err = crypto_engine_register_skcipher(&ss_algs[i].alg.skcipher); if (err) { dev_err(ss->dev, "Fail to register %s\n", - ss_algs[i].alg.skcipher.base.cra_name); + ss_algs[i].alg.skcipher.base.base.cra_name); ss_algs[i].ss = NULL; return err; } @@ -695,16 +727,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) if (ss_method == SS_ID_NOTSUPP) { dev_info(ss->dev, "DEBUG: Algo of %s not supported\n", - ss_algs[i].alg.hash.halg.base.cra_name); + ss_algs[i].alg.hash.base.halg.base.cra_name); ss_algs[i].ss = NULL; break; } dev_info(ss->dev, "Register %s\n", - ss_algs[i].alg.hash.halg.base.cra_name); - err = crypto_register_ahash(&ss_algs[i].alg.hash); + ss_algs[i].alg.hash.base.halg.base.cra_name); + err = crypto_engine_register_ahash(&ss_algs[i].alg.hash); if (err) { dev_err(ss->dev, "ERROR: Fail to register %s\n", - ss_algs[i].alg.hash.halg.base.cra_name); + ss_algs[i].alg.hash.base.halg.base.cra_name); ss_algs[i].ss = NULL; return err; } @@ -727,8 +759,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss) switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(ss->dev, "Unregister %d %s\n", i, - ss_algs[i].alg.skcipher.base.cra_name); - crypto_unregister_skcipher(&ss_algs[i].alg.skcipher); + ss_algs[i].alg.skcipher.base.base.cra_name); + crypto_engine_unregister_skcipher(&ss_algs[i].alg.skcipher); break; case CRYPTO_ALG_TYPE_RNG: dev_info(ss->dev, "Unregister %d %s\n", i, @@ -737,8 +769,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss) break; case CRYPTO_ALG_TYPE_AHASH: dev_info(ss->dev, "Unregister %d %s\n", i, - ss_algs[i].alg.hash.halg.base.cra_name); - crypto_unregister_ahash(&ss_algs[i].alg.hash); + ss_algs[i].alg.hash.base.halg.base.cra_name); + crypto_engine_unregister_ahash(&ss_algs[i].alg.hash); break; } } @@ -851,13 +883,21 @@ static int sun8i_ss_probe(struct platform_device *pdev) pm_runtime_put_sync(ss->dev); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct dentry *dbgfs_dir __maybe_unused; + struct dentry *dbgfs_stats __maybe_unused; + + /* Ignore error of debugfs */ + dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL); + dbgfs_stats = debugfs_create_file("stats", 0444, + dbgfs_dir, ss, + &sun8i_ss_debugfs_fops); + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - /* Ignore error of debugfs */ - ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL); - ss->dbgfs_stats = debugfs_create_file("stats", 0444, - ss->dbgfs_dir, ss, - &sun8i_ss_debugfs_fops); + ss->dbgfs_dir = dbgfs_dir; + ss->dbgfs_stats = dbgfs_stats; #endif + } return 0; error_alg: diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index e6865b49b1df..d70b105dcfa1 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -9,16 +9,21 @@ * * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ -#include -#include -#include -#include -#include + #include +#include +#include #include #include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include #include "sun8i-ss.h" static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key, @@ -60,14 +65,11 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash); - struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg); - struct sun8i_ss_alg_template *algt; int digestsize, i; int bs = crypto_ahash_blocksize(ahash); int ret; - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); - digestsize = algt->alg.hash.halg.digestsize; + digestsize = crypto_ahash_digestsize(ahash); if (keylen > bs) { ret = sun8i_ss_hashkey(tfmctx, key, keylen); @@ -107,36 +109,33 @@ err_opad: return ret; } -int sun8i_ss_hash_crainit(struct crypto_tfm *tfm) +int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm) { - struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); + struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; int err; - memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx)); - - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); op->ss = algt->ss; - op->enginectx.op.do_one_request = sun8i_ss_hash_run; - /* FALLBACK */ - op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0, + op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(algt->ss->dev, "Fallback driver could no be loaded\n"); return PTR_ERR(op->fallback_tfm); } - if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm)) - algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm); + crypto_ahash_set_statesize(tfm, + crypto_ahash_statesize(op->fallback_tfm)); - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + crypto_ahash_set_reqsize(tfm, sizeof(struct sun8i_ss_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); - memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME); + memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm), + CRYPTO_MAX_ALG_NAME); err = pm_runtime_get_sync(op->ss->dev); if (err < 0) @@ -148,9 +147,9 @@ error_pm: return err; } -void sun8i_ss_hash_craexit(struct crypto_tfm *tfm) +void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm) { - struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); kfree_sensitive(tfmctx->ipad); kfree_sensitive(tfmctx->opad); @@ -202,20 +201,23 @@ int sun8i_ss_hash_final(struct ahash_request *areq) struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ss_alg_template *algt; -#endif ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.result = areq->result; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.hash.base); + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); - algt->stat_fb++; + algt->stat_fb++; #endif + } return crypto_ahash_final(&rctx->fallback_req); } @@ -240,10 +242,6 @@ int sun8i_ss_hash_finup(struct ahash_request *areq) struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ss_alg_template *algt; -#endif ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & @@ -252,10 +250,18 @@ int sun8i_ss_hash_finup(struct ahash_request *areq) rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.hash.base); + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); - algt->stat_fb++; + algt->stat_fb++; #endif + } return crypto_ahash_finup(&rctx->fallback_req); } @@ -265,10 +271,6 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq) struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); -#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct sun8i_ss_alg_template *algt; -#endif ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & @@ -277,10 +279,18 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq) rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct sun8i_ss_alg_template *algt __maybe_unused; + + algt = container_of(alg, struct sun8i_ss_alg_template, + alg.hash.base); + #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); - algt->stat_fb++; + algt->stat_fb++; #endif + } return crypto_ahash_digest(&rctx->fallback_req); } @@ -347,11 +357,11 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss, static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; struct scatterlist *sg; - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); if (areq->nbytes == 0) { algt->stat_fb_len++; @@ -396,8 +406,8 @@ static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq) int sun8i_ss_hash_digest(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; struct sun8i_ss_dev *ss; struct crypto_engine *engine; @@ -406,7 +416,7 @@ int sun8i_ss_hash_digest(struct ahash_request *areq) if (sun8i_ss_hash_need_fallback(areq)) return sun8i_ss_hash_digest_fb(areq); - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); ss = algt->ss; e = sun8i_ss_get_engine_number(ss); @@ -482,8 +492,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); + struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; struct sun8i_ss_dev *ss; struct scatterlist *sg; @@ -502,10 +512,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) */ int hmac = 0; - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); ss = algt->ss; - digestsize = algt->alg.hash.halg.digestsize; + digestsize = crypto_ahash_digestsize(tfm); if (digestsize == SHA224_DIGEST_SIZE) digestsize = SHA256_DIGEST_SIZE; @@ -698,7 +708,7 @@ err_dma_result: } if (!err) - memcpy(areq->result, result, algt->alg.hash.halg.digestsize); + memcpy(areq->result, result, crypto_ahash_digestsize(tfm)); theend: local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h index bfe305261e9a..ae66eb45fb24 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -201,16 +201,12 @@ struct sun8i_cipher_req_ctx { /* * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM - * @enginectx: crypto_engine used by this TFM * @key: pointer to key data * @keylen: len of the key * @ss: pointer to the private data of driver handling this TFM * @fallback_tfm: pointer to the fallback TFM - * - * enginectx must be the first element */ struct sun8i_cipher_tfm_ctx { - struct crypto_engine_ctx enginectx; u32 *key; u32 keylen; struct sun8i_ss_dev *ss; @@ -229,14 +225,10 @@ struct sun8i_ss_rng_tfm_ctx { /* * struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM - * @enginectx: crypto_engine used by this TFM * @fallback_tfm: pointer to the fallback TFM * @ss: pointer to the private data of driver handling this TFM - * - * enginectx must be the first element */ struct sun8i_ss_hash_tfm_ctx { - struct crypto_engine_ctx enginectx; struct crypto_ahash *fallback_tfm; struct sun8i_ss_dev *ss; u8 *ipad; @@ -279,9 +271,9 @@ struct sun8i_ss_alg_template { u32 ss_blockmode; struct sun8i_ss_dev *ss; union { - struct skcipher_alg skcipher; + struct skcipher_engine_alg skcipher; struct rng_alg rng; - struct ahash_alg hash; + struct ahash_engine_alg hash; } alg; unsigned long stat_req; unsigned long stat_fb; @@ -299,6 +291,7 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); int sun8i_ss_cipher_init(struct crypto_tfm *tfm); void sun8i_ss_cipher_exit(struct crypto_tfm *tfm); +int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq); int sun8i_ss_skdecrypt(struct skcipher_request *areq); int sun8i_ss_skencrypt(struct skcipher_request *areq); @@ -311,8 +304,8 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen int sun8i_ss_prng_init(struct crypto_tfm *tfm); void sun8i_ss_prng_exit(struct crypto_tfm *tfm); -int sun8i_ss_hash_crainit(struct crypto_tfm *tfm); -void sun8i_ss_hash_craexit(struct crypto_tfm *tfm); +int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm); +void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm); int sun8i_ss_hash_init(struct ahash_request *areq); int sun8i_ss_hash_export(struct ahash_request *areq, void *out); int sun8i_ss_hash_import(struct ahash_request *areq, const void *in); From 4dd4d5e486ebdeb48dbc558237d4ba8aab8917d5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:55 +0800 Subject: [PATCH 132/149] crypto: amlogic - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/amlogic-gxl-cipher.c | 11 ++-- drivers/crypto/amlogic/amlogic-gxl-core.c | 60 ++++++++++++++------- drivers/crypto/amlogic/amlogic-gxl.h | 5 +- 3 files changed, 47 insertions(+), 29 deletions(-) diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 03b6586b71ef..3308406612fc 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -65,7 +65,7 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct meson_alg_template *algt; - algt = container_of(alg, struct meson_alg_template, alg.skcipher); + algt = container_of(alg, struct meson_alg_template, alg.skcipher.base); algt->stat_fb++; #endif skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); @@ -101,7 +101,7 @@ static int meson_cipher(struct skcipher_request *areq) void *backup_iv = NULL, *bkeyiv; u32 v; - algt = container_of(alg, struct meson_alg_template, alg.skcipher); + algt = container_of(alg, struct meson_alg_template, alg.skcipher.base); dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), @@ -258,8 +258,7 @@ theend: return err; } -static int meson_handle_cipher_request(struct crypto_engine *engine, - void *areq) +int meson_handle_cipher_request(struct crypto_engine *engine, void *areq) { int err; struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); @@ -318,7 +317,7 @@ int meson_cipher_init(struct crypto_tfm *tfm) memset(op, 0, sizeof(struct meson_cipher_tfm_ctx)); - algt = container_of(alg, struct meson_alg_template, alg.skcipher); + algt = container_of(alg, struct meson_alg_template, alg.skcipher.base); op->mc = algt->mc; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -331,8 +330,6 @@ int meson_cipher_init(struct crypto_tfm *tfm) sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm); - op->enginectx.op.do_one_request = meson_handle_cipher_request; - return 0; } diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 937187027ad5..9846d791c956 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -6,17 +6,20 @@ * * Core file which registers crypto algorithms supported by the hardware. */ + +#include +#include #include -#include -#include +#include +#include #include +#include #include +#include #include #include #include #include -#include -#include #include "amlogic-gxl.h" @@ -47,7 +50,7 @@ static struct meson_alg_template mc_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, .blockmode = MESON_OPMODE_CBC, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-gxl", @@ -68,12 +71,15 @@ static struct meson_alg_template mc_algs[] = { .setkey = meson_aes_setkey, .encrypt = meson_skencrypt, .decrypt = meson_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = meson_handle_cipher_request, + }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .blockmode = MESON_OPMODE_ECB, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-gxl", @@ -93,33 +99,43 @@ static struct meson_alg_template mc_algs[] = { .setkey = meson_aes_setkey, .encrypt = meson_skencrypt, .decrypt = meson_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = meson_handle_cipher_request, + }, }, }; -#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG static int meson_debugfs_show(struct seq_file *seq, void *v) { - struct meson_dev *mc = seq->private; + struct meson_dev *mc __maybe_unused = seq->private; int i; for (i = 0; i < MAXFLOW; i++) - seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req); + seq_printf(seq, "Channel %d: nreq %lu\n", i, +#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG + mc->chanlist[i].stat_req); +#else + 0ul); +#endif for (i = 0; i < ARRAY_SIZE(mc_algs); i++) { switch (mc_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s %lu %lu\n", - mc_algs[i].alg.skcipher.base.cra_driver_name, - mc_algs[i].alg.skcipher.base.cra_name, + mc_algs[i].alg.skcipher.base.base.cra_driver_name, + mc_algs[i].alg.skcipher.base.base.cra_name, +#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG mc_algs[i].stat_req, mc_algs[i].stat_fb); +#else + 0ul, 0ul); +#endif break; } } return 0; } DEFINE_SHOW_ATTRIBUTE(meson_debugfs); -#endif static void meson_free_chanlist(struct meson_dev *mc, int i) { @@ -183,10 +199,10 @@ static int meson_register_algs(struct meson_dev *mc) mc_algs[i].mc = mc; switch (mc_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: - err = crypto_register_skcipher(&mc_algs[i].alg.skcipher); + err = crypto_engine_register_skcipher(&mc_algs[i].alg.skcipher); if (err) { dev_err(mc->dev, "Fail to register %s\n", - mc_algs[i].alg.skcipher.base.cra_name); + mc_algs[i].alg.skcipher.base.base.cra_name); mc_algs[i].mc = NULL; return err; } @@ -206,7 +222,7 @@ static void meson_unregister_algs(struct meson_dev *mc) continue; switch (mc_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: - crypto_unregister_skcipher(&mc_algs[i].alg.skcipher); + crypto_engine_unregister_skcipher(&mc_algs[i].alg.skcipher); break; } } @@ -264,10 +280,16 @@ static int meson_crypto_probe(struct platform_device *pdev) if (err) goto error_alg; + if (IS_ENABLED(CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG)) { + struct dentry *dbgfs_dir; + + dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL); + debugfs_create_file("stats", 0444, dbgfs_dir, mc, &meson_debugfs_fops); + #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG - mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL); - debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops); + mc->dbgfs_dir = dbgfs_dir; #endif + } return 0; error_alg: diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index 8c0746a1d6d4..1013a666c932 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -114,7 +114,6 @@ struct meson_cipher_req_ctx { /* * struct meson_cipher_tfm_ctx - context for a skcipher TFM - * @enginectx: crypto_engine used by this TFM * @key: pointer to key data * @keylen: len of the key * @keymode: The keymode(type and size of key) associated with this TFM @@ -122,7 +121,6 @@ struct meson_cipher_req_ctx { * @fallback_tfm: pointer to the fallback TFM */ struct meson_cipher_tfm_ctx { - struct crypto_engine_ctx enginectx; u32 *key; u32 keylen; u32 keymode; @@ -143,7 +141,7 @@ struct meson_alg_template { u32 type; u32 blockmode; union { - struct skcipher_alg skcipher; + struct skcipher_engine_alg skcipher; } alg; struct meson_dev *mc; #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG @@ -160,3 +158,4 @@ int meson_cipher_init(struct crypto_tfm *tfm); void meson_cipher_exit(struct crypto_tfm *tfm); int meson_skdecrypt(struct skcipher_request *areq); int meson_skencrypt(struct skcipher_request *areq); +int meson_handle_cipher_request(struct crypto_engine *engine, void *areq); From 304506f299b324f188b624dca2d4f29722746432 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:57 +0800 Subject: [PATCH 133/149] crypto: aspeed - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-acry.c | 39 +++--- drivers/crypto/aspeed/aspeed-hace-crypto.c | 132 +++++++++++++++------ drivers/crypto/aspeed/aspeed-hace-hash.c | 97 +++++++++++---- drivers/crypto/aspeed/aspeed-hace.c | 9 +- drivers/crypto/aspeed/aspeed-hace.h | 30 ++--- 5 files changed, 202 insertions(+), 105 deletions(-) diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c index 5ae529ce6806..2a970367e686 100644 --- a/drivers/crypto/aspeed/aspeed-acry.c +++ b/drivers/crypto/aspeed/aspeed-acry.c @@ -2,25 +2,26 @@ /* * Copyright 2021 Aspeed Technology Inc. */ -#include -#include #include #include #include #include #include -#include +#include +#include +#include +#include +#include +#include #include -#include -#include #include #include -#include -#include -#include -#include -#include +#include +#include +#include #include +#include +#include #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG #define ACRY_DBG(d, fmt, ...) \ @@ -112,7 +113,6 @@ struct aspeed_acry_dev { }; struct aspeed_acry_ctx { - struct crypto_engine_ctx enginectx; struct aspeed_acry_dev *acry_dev; struct rsa_key key; @@ -131,7 +131,7 @@ struct aspeed_acry_ctx { struct aspeed_acry_alg { struct aspeed_acry_dev *acry_dev; - struct akcipher_alg akcipher; + struct akcipher_engine_alg akcipher; }; enum aspeed_rsa_key_mode { @@ -577,7 +577,7 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm) const char *name = crypto_tfm_alg_name(&tfm->base); struct aspeed_acry_alg *acry_alg; - acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher); + acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base); ctx->acry_dev = acry_alg->acry_dev; @@ -589,8 +589,6 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->fallback_tfm); } - ctx->enginectx.op.do_one_request = aspeed_acry_do_request; - return 0; } @@ -603,7 +601,7 @@ static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm) static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = { { - .akcipher = { + .akcipher.base = { .encrypt = aspeed_acry_rsa_enc, .decrypt = aspeed_acry_rsa_dec, .sign = aspeed_acry_rsa_dec, @@ -625,6 +623,9 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = { .cra_ctxsize = sizeof(struct aspeed_acry_ctx), }, }, + .akcipher.op = { + .do_one_request = aspeed_acry_do_request, + }, }, }; @@ -634,10 +635,10 @@ static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev) for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) { aspeed_acry_akcipher_algs[i].acry_dev = acry_dev; - rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); + rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); if (rc) { ACRY_DBG(acry_dev, "Failed to register %s\n", - aspeed_acry_akcipher_algs[i].akcipher.base.cra_name); + aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name); } } } @@ -647,7 +648,7 @@ static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev) int i; for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) - crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); + crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); } /* ACRY interrupt service routine. */ diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c index 8d6d9ecb3a28..f0eddb7854e5 100644 --- a/drivers/crypto/aspeed/aspeed-hace-crypto.c +++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c @@ -4,6 +4,17 @@ */ #include "aspeed-hace.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG #define CIPHER_DBG(h, fmt, ...) \ @@ -696,7 +707,7 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm) struct aspeed_hace_alg *crypto_alg; - crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher); + crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher.base); ctx->hace_dev = crypto_alg->hace_dev; ctx->start = aspeed_hace_skcipher_trigger; @@ -713,8 +724,6 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) + crypto_skcipher_reqsize(ctx->fallback_tfm)); - ctx->enginectx.op.do_one_request = aspeed_crypto_do_request; - return 0; } @@ -729,7 +738,7 @@ static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm) static struct aspeed_hace_alg aspeed_crypto_algs[] = { { - .alg.skcipher = { + .alg.skcipher.base = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aspeed_aes_setkey, @@ -749,10 +758,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -773,10 +785,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -797,10 +812,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -821,10 +839,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = aspeed_des_setkey, @@ -844,10 +865,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, @@ -868,10 +892,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, @@ -892,10 +919,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, @@ -916,10 +946,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = aspeed_des_setkey, @@ -939,10 +972,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, @@ -963,10 +999,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, @@ -987,10 +1026,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, @@ -1011,13 +1053,16 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, }; static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -1037,10 +1082,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, @@ -1060,10 +1108,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, { - .alg.skcipher = { + .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, @@ -1083,7 +1134,10 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } - } + }, + .alg.skcipher.op = { + .do_one_request = aspeed_crypto_do_request, + }, }, }; @@ -1093,13 +1147,13 @@ void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) int i; for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) - crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher); + crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher); if (hace_dev->version != AST2600_VERSION) return; for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) - crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); + crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); } void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) @@ -1110,10 +1164,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) { aspeed_crypto_algs[i].hace_dev = hace_dev; - rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher); + rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher); if (rc) { CIPHER_DBG(hace_dev, "Failed to register %s\n", - aspeed_crypto_algs[i].alg.skcipher.base.cra_name); + aspeed_crypto_algs[i].alg.skcipher.base.base.cra_name); } } @@ -1122,10 +1176,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) { aspeed_crypto_algs_g6[i].hace_dev = hace_dev; - rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); + rc = crypto_engine_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); if (rc) { CIPHER_DBG(hace_dev, "Failed to register %s\n", - aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name); + aspeed_crypto_algs_g6[i].alg.skcipher.base.base.cra_name); } } } diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c index f8c96568142e..abc459af2ac8 100644 --- a/drivers/crypto/aspeed/aspeed-hace-hash.c +++ b/drivers/crypto/aspeed/aspeed-hace-hash.c @@ -4,6 +4,17 @@ */ #include "aspeed-hace.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG #define AHASH_DBG(h, fmt, ...) \ @@ -858,7 +869,7 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm) struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm); struct aspeed_hace_alg *ast_alg; - ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash); + ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base); tctx->hace_dev = ast_alg->hace_dev; tctx->flags = 0; @@ -880,8 +891,6 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm) } } - tctx->enginectx.op.do_one_request = aspeed_ahash_do_one; - return 0; } @@ -919,7 +928,7 @@ static int aspeed_sham_import(struct ahash_request *req, const void *in) static struct aspeed_hace_alg aspeed_ahash_algs[] = { { - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -946,9 +955,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -975,9 +987,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1004,10 +1019,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { .alg_base = "sha1", - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1036,10 +1054,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { .alg_base = "sha224", - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1068,10 +1089,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { .alg_base = "sha256", - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1100,12 +1124,15 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, }; static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { { - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1132,9 +1159,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1161,9 +1191,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sha512s_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1190,9 +1223,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sha512s_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1219,10 +1255,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { .alg_base = "sha384", - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1251,10 +1290,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { .alg_base = "sha512", - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1283,10 +1325,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { .alg_base = "sha512_224", - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sha512s_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1315,10 +1360,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, { .alg_base = "sha512_256", - .alg.ahash = { + .alg.ahash.base = { .init = aspeed_sha512s_init, .update = aspeed_sham_update, .final = aspeed_sham_final, @@ -1347,6 +1395,9 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { } } }, + .alg.ahash.op = { + .do_one_request = aspeed_ahash_do_one, + }, }, }; @@ -1355,13 +1406,13 @@ void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev) int i; for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) - crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash); + crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash); if (hace_dev->version != AST2600_VERSION) return; for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) - crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); + crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); } void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev) @@ -1372,10 +1423,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev) for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) { aspeed_ahash_algs[i].hace_dev = hace_dev; - rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash); + rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash); if (rc) { AHASH_DBG(hace_dev, "Failed to register %s\n", - aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name); + aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name); } } @@ -1384,10 +1435,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev) for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) { aspeed_ahash_algs_g6[i].hace_dev = hace_dev; - rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); + rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); if (rc) { AHASH_DBG(hace_dev, "Failed to register %s\n", - aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name); + aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name); } } } diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c index d2871e1de9c2..8f7aab82e1d8 100644 --- a/drivers/crypto/aspeed/aspeed-hace.c +++ b/drivers/crypto/aspeed/aspeed-hace.c @@ -3,7 +3,14 @@ * Copyright (c) 2021 Aspeed Technology Inc. */ +#include "aspeed-hace.h" +#include #include +#include +#include +#include +#include +#include #include #include #include @@ -11,8 +18,6 @@ #include #include -#include "aspeed-hace.h" - #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG #define HACE_DBG(d, fmt, ...) \ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h index 05d0a15d546d..68f70e01fccb 100644 --- a/drivers/crypto/aspeed/aspeed-hace.h +++ b/drivers/crypto/aspeed/aspeed-hace.h @@ -2,25 +2,14 @@ #ifndef __ASPEED_HACE_H__ #define __ASPEED_HACE_H__ -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include +#include #include +#include +#include +#include +#include /***************************** * * @@ -144,6 +133,7 @@ HACE_CMD_OFB | HACE_CMD_CTR) struct aspeed_hace_dev; +struct scatterlist; typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *); @@ -178,8 +168,6 @@ struct aspeed_sha_hmac_ctx { }; struct aspeed_sham_ctx { - struct crypto_engine_ctx enginectx; - struct aspeed_hace_dev *hace_dev; unsigned long flags; /* hmac flag */ @@ -235,8 +223,6 @@ struct aspeed_engine_crypto { }; struct aspeed_cipher_ctx { - struct crypto_engine_ctx enginectx; - struct aspeed_hace_dev *hace_dev; int key_len; u8 key[AES_MAX_KEYLENGTH]; @@ -275,8 +261,8 @@ struct aspeed_hace_alg { const char *alg_base; union { - struct skcipher_alg skcipher; - struct ahash_alg ahash; + struct skcipher_engine_alg skcipher; + struct ahash_engine_alg ahash; } alg; }; From d33a6a3f5a68b01acf0d79b626c9b857cc9719cc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:54:59 +0800 Subject: [PATCH 134/149] crypto: aspeed - Remove non-standard sha512 algorithms Algorithms must never be added to a driver unless there is a generic implementation. These truncated versions of sha512 slipped through. Remove them as they are useless. Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-hace-hash.c | 212 ----------------------- 1 file changed, 212 deletions(-) diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c index abc459af2ac8..0b6e49c06eff 100644 --- a/drivers/crypto/aspeed/aspeed-hace-hash.c +++ b/drivers/crypto/aspeed/aspeed-hace-hash.c @@ -59,28 +59,6 @@ static const __be64 sha512_iv[8] = { cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7) }; -static const __be32 sha512_224_iv[16] = { - cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL), - cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL), - cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL), - cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL), - cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL), - cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL), - cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL), - cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL) -}; - -static const __be32 sha512_256_iv[16] = { - cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL), - cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL), - cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL), - cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL), - cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL), - cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL), - cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL), - cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL) -}; - /* The purpose of this padding is to ensure that the padded message is a * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). * The bit "1" is appended at the end of the message followed by @@ -765,62 +743,6 @@ static int aspeed_sham_init(struct ahash_request *req) return 0; } -static int aspeed_sha512s_init(struct ahash_request *req) -{ - struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); - struct aspeed_hace_dev *hace_dev = tctx->hace_dev; - struct aspeed_sha_hmac_ctx *bctx = tctx->base; - - AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm)); - - rctx->cmd = HASH_CMD_ACC_MODE; - rctx->flags = 0; - - switch (crypto_ahash_digestsize(tfm)) { - case SHA224_DIGEST_SIZE: - rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 | - HASH_CMD_SHA_SWAP; - rctx->flags |= SHA_FLAGS_SHA512_224; - rctx->digsize = SHA224_DIGEST_SIZE; - rctx->block_size = SHA512_BLOCK_SIZE; - rctx->sha_iv = sha512_224_iv; - rctx->ivsize = 64; - memcpy(rctx->digest, sha512_224_iv, rctx->ivsize); - break; - case SHA256_DIGEST_SIZE: - rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 | - HASH_CMD_SHA_SWAP; - rctx->flags |= SHA_FLAGS_SHA512_256; - rctx->digsize = SHA256_DIGEST_SIZE; - rctx->block_size = SHA512_BLOCK_SIZE; - rctx->sha_iv = sha512_256_iv; - rctx->ivsize = 64; - memcpy(rctx->digest, sha512_256_iv, rctx->ivsize); - break; - default: - dev_warn(tctx->hace_dev->dev, "digest size %d not support\n", - crypto_ahash_digestsize(tfm)); - return -EINVAL; - } - - rctx->bufcnt = 0; - rctx->total = 0; - rctx->digcnt[0] = 0; - rctx->digcnt[1] = 0; - - /* HMAC init */ - if (tctx->flags & SHA_FLAGS_HMAC) { - rctx->digcnt[0] = rctx->block_size; - rctx->bufcnt = rctx->block_size; - memcpy(rctx->buffer, bctx->ipad, rctx->block_size); - rctx->flags |= SHA_FLAGS_HMAC; - } - - return 0; -} - static int aspeed_sham_digest(struct ahash_request *req) { return aspeed_sham_init(req) ? : aspeed_sham_finup(req); @@ -1195,70 +1117,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { .do_one_request = aspeed_ahash_do_one, }, }, - { - .alg.ahash.base = { - .init = aspeed_sha512s_init, - .update = aspeed_sham_update, - .final = aspeed_sham_final, - .finup = aspeed_sham_finup, - .digest = aspeed_sham_digest, - .export = aspeed_sham_export, - .import = aspeed_sham_import, - .halg = { - .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct aspeed_sham_reqctx), - .base = { - .cra_name = "sha512_224", - .cra_driver_name = "aspeed-sha512_224", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aspeed_sham_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = aspeed_sham_cra_init, - .cra_exit = aspeed_sham_cra_exit, - } - } - }, - .alg.ahash.op = { - .do_one_request = aspeed_ahash_do_one, - }, - }, - { - .alg.ahash.base = { - .init = aspeed_sha512s_init, - .update = aspeed_sham_update, - .final = aspeed_sham_final, - .finup = aspeed_sham_finup, - .digest = aspeed_sham_digest, - .export = aspeed_sham_export, - .import = aspeed_sham_import, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct aspeed_sham_reqctx), - .base = { - .cra_name = "sha512_256", - .cra_driver_name = "aspeed-sha512_256", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aspeed_sham_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = aspeed_sham_cra_init, - .cra_exit = aspeed_sham_cra_exit, - } - } - }, - .alg.ahash.op = { - .do_one_request = aspeed_ahash_do_one, - }, - }, { .alg_base = "sha384", .alg.ahash.base = { @@ -1329,76 +1187,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { .do_one_request = aspeed_ahash_do_one, }, }, - { - .alg_base = "sha512_224", - .alg.ahash.base = { - .init = aspeed_sha512s_init, - .update = aspeed_sham_update, - .final = aspeed_sham_final, - .finup = aspeed_sham_finup, - .digest = aspeed_sham_digest, - .setkey = aspeed_sham_setkey, - .export = aspeed_sham_export, - .import = aspeed_sham_import, - .halg = { - .digestsize = SHA224_DIGEST_SIZE, - .statesize = sizeof(struct aspeed_sham_reqctx), - .base = { - .cra_name = "hmac(sha512_224)", - .cra_driver_name = "aspeed-hmac-sha512_224", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + - sizeof(struct aspeed_sha_hmac_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = aspeed_sham_cra_init, - .cra_exit = aspeed_sham_cra_exit, - } - } - }, - .alg.ahash.op = { - .do_one_request = aspeed_ahash_do_one, - }, - }, - { - .alg_base = "sha512_256", - .alg.ahash.base = { - .init = aspeed_sha512s_init, - .update = aspeed_sham_update, - .final = aspeed_sham_final, - .finup = aspeed_sham_finup, - .digest = aspeed_sham_digest, - .setkey = aspeed_sham_setkey, - .export = aspeed_sham_export, - .import = aspeed_sham_import, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct aspeed_sham_reqctx), - .base = { - .cra_name = "hmac(sha512_256)", - .cra_driver_name = "aspeed-hmac-sha512_256", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + - sizeof(struct aspeed_sha_hmac_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = aspeed_sham_cra_init, - .cra_exit = aspeed_sham_cra_exit, - } - } - }, - .alg.ahash.op = { - .do_one_request = aspeed_ahash_do_one, - }, - }, }; void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev) From 623814c0408771399c4209db73f60685f7cf1d14 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:01 +0800 Subject: [PATCH 135/149] crypto: caam - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 382 +++++++++++++++++++++++++-------- drivers/crypto/caam/caamhash.c | 28 +-- drivers/crypto/caam/caampkc.c | 20 +- drivers/crypto/caam/caampkc.h | 3 - 4 files changed, 320 insertions(+), 113 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index da8182ee86fe..eba2d750c3b0 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -57,11 +57,14 @@ #include "key_gen.h" #include "caamalg_desc.h" #include +#include #include +#include #include #include #include #include +#include #include #include #include @@ -95,13 +98,13 @@ struct caam_alg_entry { }; struct caam_aead_alg { - struct aead_alg aead; + struct aead_engine_alg aead; struct caam_alg_entry caam; bool registered; }; struct caam_skcipher_alg { - struct skcipher_alg skcipher; + struct skcipher_engine_alg skcipher; struct caam_alg_entry caam; bool registered; }; @@ -110,7 +113,6 @@ struct caam_skcipher_alg { * per-session context */ struct caam_ctx { - struct crypto_engine_ctx enginectx; u32 sh_desc_enc[DESC_MAX_USED_LEN]; u32 sh_desc_dec[DESC_MAX_USED_LEN]; u8 key[CAAM_MAX_KEY_SIZE]; @@ -188,7 +190,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) static int aead_set_sh_desc(struct crypto_aead *aead) { struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), - struct caam_aead_alg, aead); + struct caam_aead_alg, + aead.base); unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; @@ -738,7 +741,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), typeof(*alg), - skcipher); + skcipher.base); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc; @@ -1195,7 +1198,8 @@ static void init_authenc_job(struct aead_request *req, { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), - struct caam_aead_alg, aead); + struct caam_aead_alg, + aead.base); unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); @@ -1881,7 +1885,7 @@ static int skcipher_decrypt(struct skcipher_request *req) static struct caam_skcipher_alg driver_algs[] = { { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-caam", @@ -1894,10 +1898,13 @@ static struct caam_skcipher_alg driver_algs[] = { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-3des-caam", @@ -1910,10 +1917,13 @@ static struct caam_skcipher_alg driver_algs[] = { .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-caam", @@ -1926,10 +1936,13 @@ static struct caam_skcipher_alg driver_algs[] = { .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-caam", @@ -1943,11 +1956,14 @@ static struct caam_skcipher_alg driver_algs[] = { .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "rfc3686-ctr-aes-caam", @@ -1963,6 +1979,9 @@ static struct caam_skcipher_alg driver_algs[] = { .ivsize = CTR_RFC3686_IV_SIZE, .chunksize = AES_BLOCK_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -1970,7 +1989,7 @@ static struct caam_skcipher_alg driver_algs[] = { }, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-caam", @@ -1984,10 +2003,13 @@ static struct caam_skcipher_alg driver_algs[] = { .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "ecb(des)", .cra_driver_name = "ecb-des-caam", @@ -1999,10 +2021,13 @@ static struct caam_skcipher_alg driver_algs[] = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-caam", @@ -2014,10 +2039,13 @@ static struct caam_skcipher_alg driver_algs[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB, }, { - .skcipher = { + .skcipher.base = { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-caam", @@ -2029,13 +2057,16 @@ static struct caam_skcipher_alg driver_algs[] = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, }, + .skcipher.op = { + .do_one_request = skcipher_do_one_req, + }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, }, }; static struct caam_aead_alg driver_aeads[] = { { - .aead = { + .aead.base = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-caam", @@ -2048,13 +2079,16 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "rfc4543(gcm(aes))", .cra_driver_name = "rfc4543-gcm-aes-caam", @@ -2067,6 +2101,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = GCM_RFC4543_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, @@ -2074,7 +2111,7 @@ static struct caam_aead_alg driver_aeads[] = { }, /* Galois Counter Mode */ { - .aead = { + .aead.base = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-caam", @@ -2087,6 +2124,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, @@ -2094,7 +2134,7 @@ static struct caam_aead_alg driver_aeads[] = { }, /* single-pass ipsec_esp descriptor */ { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(md5)," "ecb(cipher_null))", @@ -2109,13 +2149,16 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = NULL_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha1)," "ecb(cipher_null))", @@ -2130,13 +2173,16 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = NULL_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha224)," "ecb(cipher_null))", @@ -2151,13 +2197,16 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = NULL_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha256)," "ecb(cipher_null))", @@ -2172,13 +2221,16 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = NULL_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha384)," "ecb(cipher_null))", @@ -2193,13 +2245,16 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = NULL_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha512)," "ecb(cipher_null))", @@ -2214,13 +2269,16 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = NULL_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-" @@ -2234,6 +2292,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | @@ -2241,7 +2302,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(aes)))", @@ -2256,6 +2317,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | @@ -2264,7 +2328,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-" @@ -2278,6 +2342,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | @@ -2285,7 +2352,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(aes)))", @@ -2300,6 +2367,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | @@ -2308,7 +2378,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "authenc-hmac-sha224-" @@ -2322,6 +2392,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | @@ -2329,7 +2402,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(aes)))", @@ -2344,6 +2417,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | @@ -2352,7 +2428,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-" @@ -2366,6 +2442,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | @@ -2373,7 +2452,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(aes)))", @@ -2388,6 +2467,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | @@ -2396,7 +2478,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "authenc-hmac-sha384-" @@ -2410,6 +2492,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | @@ -2417,7 +2502,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(aes)))", @@ -2432,6 +2517,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | @@ -2440,7 +2528,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "authenc-hmac-sha512-" @@ -2454,6 +2542,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | @@ -2461,7 +2552,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(aes)))", @@ -2476,6 +2567,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | @@ -2484,7 +2578,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-" @@ -2498,6 +2592,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | @@ -2505,7 +2602,7 @@ static struct caam_aead_alg driver_aeads[] = { } }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des3_ede)))", @@ -2520,6 +2617,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | @@ -2528,7 +2628,7 @@ static struct caam_aead_alg driver_aeads[] = { } }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha1)," "cbc(des3_ede))", @@ -2543,6 +2643,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | @@ -2550,7 +2653,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des3_ede)))", @@ -2566,6 +2669,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | @@ -2574,7 +2680,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha224)," "cbc(des3_ede))", @@ -2589,6 +2695,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | @@ -2596,7 +2705,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des3_ede)))", @@ -2612,6 +2721,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | @@ -2620,7 +2732,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha256)," "cbc(des3_ede))", @@ -2635,6 +2747,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | @@ -2642,7 +2757,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des3_ede)))", @@ -2658,6 +2773,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | @@ -2666,7 +2784,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha384)," "cbc(des3_ede))", @@ -2681,6 +2799,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | @@ -2688,7 +2809,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des3_ede)))", @@ -2704,6 +2825,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | @@ -2712,7 +2836,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha512)," "cbc(des3_ede))", @@ -2727,6 +2851,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | @@ -2734,7 +2861,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des3_ede)))", @@ -2750,6 +2877,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | @@ -2758,7 +2888,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-" @@ -2772,6 +2902,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | @@ -2779,7 +2912,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des)))", @@ -2794,6 +2927,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | @@ -2802,7 +2938,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-" @@ -2816,6 +2952,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | @@ -2823,7 +2962,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des)))", @@ -2838,6 +2977,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | @@ -2846,7 +2988,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-" @@ -2860,6 +3002,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | @@ -2867,7 +3012,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des)))", @@ -2882,6 +3027,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | @@ -2890,7 +3038,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-" @@ -2904,6 +3052,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | @@ -2911,7 +3062,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", @@ -2926,6 +3077,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | @@ -2934,7 +3088,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-" @@ -2948,6 +3102,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | @@ -2955,7 +3112,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des)))", @@ -2970,6 +3127,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | @@ -2978,7 +3138,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-" @@ -2992,6 +3152,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | @@ -2999,7 +3162,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des)))", @@ -3014,6 +3177,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | @@ -3022,7 +3188,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(md5)," "rfc3686(ctr(aes)))", @@ -3037,6 +3203,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3046,7 +3215,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "seqiv(authenc(" "hmac(md5),rfc3686(ctr(aes))))", @@ -3061,6 +3230,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3071,7 +3243,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha1)," "rfc3686(ctr(aes)))", @@ -3086,6 +3258,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3095,7 +3270,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "seqiv(authenc(" "hmac(sha1),rfc3686(ctr(aes))))", @@ -3110,6 +3285,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3120,7 +3298,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha224)," "rfc3686(ctr(aes)))", @@ -3135,6 +3313,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3144,7 +3325,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "seqiv(authenc(" "hmac(sha224),rfc3686(ctr(aes))))", @@ -3159,6 +3340,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3169,7 +3353,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha256)," "rfc3686(ctr(aes)))", @@ -3184,6 +3368,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3193,7 +3380,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "seqiv(authenc(hmac(sha256)," "rfc3686(ctr(aes))))", @@ -3208,6 +3395,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3218,7 +3408,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha384)," "rfc3686(ctr(aes)))", @@ -3233,6 +3423,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3242,7 +3435,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "seqiv(authenc(hmac(sha384)," "rfc3686(ctr(aes))))", @@ -3257,6 +3450,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3267,7 +3463,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "authenc(hmac(sha512)," "rfc3686(ctr(aes)))", @@ -3282,6 +3478,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3291,7 +3490,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "seqiv(authenc(hmac(sha512)," "rfc3686(ctr(aes))))", @@ -3306,6 +3505,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, @@ -3316,7 +3518,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "rfc7539(chacha20,poly1305)", .cra_driver_name = "rfc7539-chacha20-poly1305-" @@ -3330,6 +3532,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = CHACHAPOLY_IV_SIZE, .maxauthsize = POLY1305_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | OP_ALG_AAI_AEAD, @@ -3339,7 +3544,7 @@ static struct caam_aead_alg driver_aeads[] = { }, }, { - .aead = { + .aead.base = { .base = { .cra_name = "rfc7539esp(chacha20,poly1305)", .cra_driver_name = "rfc7539esp-chacha20-" @@ -3353,6 +3558,9 @@ static struct caam_aead_alg driver_aeads[] = { .ivsize = 8, .maxauthsize = POLY1305_DIGEST_SIZE, }, + .aead.op = { + .do_one_request = aead_do_one_req, + }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | OP_ALG_AAI_AEAD, @@ -3412,13 +3620,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = - container_of(alg, typeof(*caam_alg), skcipher); + container_of(alg, typeof(*caam_alg), skcipher.base); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; int ret = 0; - ctx->enginectx.op.do_one_request = skcipher_do_one_req; - if (alg_aai == OP_ALG_AAI_XTS) { const char *tfm_name = crypto_tfm_alg_name(&tfm->base); struct crypto_skcipher *fallback; @@ -3449,13 +3655,11 @@ static int caam_aead_init(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); struct caam_aead_alg *caam_alg = - container_of(alg, struct caam_aead_alg, aead); + container_of(alg, struct caam_aead_alg, aead.base); struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx)); - ctx->enginectx.op.do_one_request = aead_do_one_req; - return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); } @@ -3490,20 +3694,20 @@ void caam_algapi_exit(void) struct caam_aead_alg *t_alg = driver_aeads + i; if (t_alg->registered) - crypto_unregister_aead(&t_alg->aead); + crypto_engine_unregister_aead(&t_alg->aead); } for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; if (t_alg->registered) - crypto_unregister_skcipher(&t_alg->skcipher); + crypto_engine_unregister_skcipher(&t_alg->skcipher); } } static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) { - struct skcipher_alg *alg = &t_alg->skcipher; + struct skcipher_alg *alg = &t_alg->skcipher.base; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; @@ -3517,7 +3721,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) static void caam_aead_alg_init(struct caam_aead_alg *t_alg) { - struct aead_alg *alg = &t_alg->aead; + struct aead_alg *alg = &t_alg->aead.base; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; @@ -3607,10 +3811,10 @@ int caam_algapi_init(struct device *ctrldev) caam_skcipher_alg_init(t_alg); - err = crypto_register_skcipher(&t_alg->skcipher); + err = crypto_engine_register_skcipher(&t_alg->skcipher); if (err) { pr_warn("%s alg registration failed\n", - t_alg->skcipher.base.cra_driver_name); + t_alg->skcipher.base.base.cra_driver_name); continue; } @@ -3654,15 +3858,15 @@ int caam_algapi_init(struct device *ctrldev) * if MD or MD size is not supported by device. */ if (is_mdha(c2_alg_sel) && - (!md_inst || t_alg->aead.maxauthsize > md_limit)) + (!md_inst || t_alg->aead.base.maxauthsize > md_limit)) continue; caam_aead_alg_init(t_alg); - err = crypto_register_aead(&t_alg->aead); + err = crypto_engine_register_aead(&t_alg->aead); if (err) { pr_warn("%s alg registration failed\n", - t_alg->aead.base.cra_driver_name); + t_alg->aead.base.base.cra_driver_name); continue; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 9ef25387f6b6..290c8500c247 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -66,8 +66,12 @@ #include "key_gen.h" #include "caamhash_desc.h" #include +#include #include +#include #include +#include +#include #define CAAM_CRA_PRIORITY 3000 @@ -89,7 +93,6 @@ static struct list_head hash_list; /* ahash per-session context */ struct caam_hash_ctx { - struct crypto_engine_ctx enginectx; u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; @@ -1750,7 +1753,7 @@ static struct caam_hash_template driver_hash[] = { struct caam_hash_alg { struct list_head entry; int alg_type; - struct ahash_alg ahash_alg; + struct ahash_engine_alg ahash_alg; }; static int caam_hash_cra_init(struct crypto_tfm *tfm) @@ -1762,7 +1765,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) struct ahash_alg *alg = container_of(halg, struct ahash_alg, halg); struct caam_hash_alg *caam_hash = - container_of(alg, struct caam_hash_alg, ahash_alg); + container_of(alg, struct caam_hash_alg, ahash_alg.base); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, @@ -1853,8 +1856,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) sh_desc_digest) - sh_desc_update_offset; - ctx->enginectx.op.do_one_request = ahash_do_one_req; - crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); /* @@ -1887,7 +1888,7 @@ void caam_algapi_hash_exit(void) return; list_for_each_entry_safe(t_alg, n, &hash_list, entry) { - crypto_unregister_ahash(&t_alg->ahash_alg); + crypto_engine_unregister_ahash(&t_alg->ahash_alg); list_del(&t_alg->entry); kfree(t_alg); } @@ -1905,8 +1906,8 @@ caam_hash_alloc(struct caam_hash_template *template, if (!t_alg) return ERR_PTR(-ENOMEM); - t_alg->ahash_alg = template->template_ahash; - halg = &t_alg->ahash_alg; + t_alg->ahash_alg.base = template->template_ahash; + halg = &t_alg->ahash_alg.base; alg = &halg->halg.base; if (keyed) { @@ -1919,7 +1920,7 @@ caam_hash_alloc(struct caam_hash_template *template, template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); - t_alg->ahash_alg.setkey = NULL; + halg->setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; @@ -1931,6 +1932,7 @@ caam_hash_alloc(struct caam_hash_template *template, alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; + t_alg->ahash_alg.op.do_one_request = ahash_do_one_req; return t_alg; } @@ -1992,10 +1994,10 @@ int caam_algapi_hash_init(struct device *ctrldev) continue; } - err = crypto_register_ahash(&t_alg->ahash_alg); + err = crypto_engine_register_ahash(&t_alg->ahash_alg); if (err) { pr_warn("%s alg registration failed: %d\n", - t_alg->ahash_alg.halg.base.cra_driver_name, + t_alg->ahash_alg.base.halg.base.cra_driver_name, err); kfree(t_alg); } else @@ -2012,10 +2014,10 @@ int caam_algapi_hash_init(struct device *ctrldev) continue; } - err = crypto_register_ahash(&t_alg->ahash_alg); + err = crypto_engine_register_ahash(&t_alg->ahash_alg); if (err) { pr_warn("%s alg registration failed: %d\n", - t_alg->ahash_alg.halg.base.cra_driver_name, + t_alg->ahash_alg.base.halg.base.cra_driver_name, err); kfree(t_alg); } else diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 1809c97d04be..887a5f2fb927 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -18,7 +18,10 @@ #include "caampkc.h" #include #include +#include #include +#include +#include #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ @@ -39,7 +42,7 @@ static u8 *zero_buffer; static bool init_done; struct caam_akcipher_alg { - struct akcipher_alg akcipher; + struct akcipher_engine_alg akcipher; bool registered; }; @@ -1124,8 +1127,6 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) return -ENOMEM; } - ctx->enginectx.op.do_one_request = akcipher_do_one_req; - return 0; } @@ -1142,7 +1143,7 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) } static struct caam_akcipher_alg caam_rsa = { - .akcipher = { + .akcipher.base = { .encrypt = caam_rsa_enc, .decrypt = caam_rsa_dec, .set_pub_key = caam_rsa_set_pub_key, @@ -1158,7 +1159,10 @@ static struct caam_akcipher_alg caam_rsa = { .cra_ctxsize = sizeof(struct caam_rsa_ctx) + CRYPTO_DMA_PADDING, }, - } + }, + .akcipher.op = { + .do_one_request = akcipher_do_one_req, + }, }; /* Public Key Cryptography module initialization handler */ @@ -1196,12 +1200,12 @@ int caam_pkc_init(struct device *ctrldev) if (!zero_buffer) return -ENOMEM; - err = crypto_register_akcipher(&caam_rsa.akcipher); + err = crypto_engine_register_akcipher(&caam_rsa.akcipher); if (err) { kfree(zero_buffer); dev_warn(ctrldev, "%s alg registration failed\n", - caam_rsa.akcipher.base.cra_driver_name); + caam_rsa.akcipher.base.base.cra_driver_name); } else { init_done = true; caam_rsa.registered = true; @@ -1217,7 +1221,7 @@ void caam_pkc_exit(void) return; if (caam_rsa.registered) - crypto_unregister_akcipher(&caam_rsa.akcipher); + crypto_engine_unregister_akcipher(&caam_rsa.akcipher); kfree(zero_buffer); } diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h index cc889a525e2f..96d03704c9be 100644 --- a/drivers/crypto/caam/caampkc.h +++ b/drivers/crypto/caam/caampkc.h @@ -12,7 +12,6 @@ #define _PKC_DESC_H_ #include "compat.h" #include "pdb.h" -#include /** * caam_priv_key_form - CAAM RSA private key representation @@ -88,13 +87,11 @@ struct caam_rsa_key { /** * caam_rsa_ctx - per session context. - * @enginectx : crypto engine context * @key : RSA key in DMA zone * @dev : device structure * @padding_dma : dma address of padding, for adding it to the input */ struct caam_rsa_ctx { - struct crypto_engine_ctx enginectx; struct caam_rsa_key key; struct device *dev; dma_addr_t padding_dma; From 67b7702c5b03b89ca196c5bd137407509b397049 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:04 +0800 Subject: [PATCH 136/149] crypto: sl3516 - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/gemini/sl3516-ce-cipher.c | 20 +++++----- drivers/crypto/gemini/sl3516-ce-core.c | 49 +++++++++++++++--------- drivers/crypto/gemini/sl3516-ce.h | 8 +--- 3 files changed, 43 insertions(+), 34 deletions(-) diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c index 0232f847785a..49dce9e0a834 100644 --- a/drivers/crypto/gemini/sl3516-ce-cipher.c +++ b/drivers/crypto/gemini/sl3516-ce-cipher.c @@ -8,13 +8,17 @@ * ECB mode. */ -#include +#include +#include +#include #include #include +#include #include +#include #include -#include -#include +#include +#include #include "sl3516-ce.h" /* sl3516_ce_need_fallback - check if a request can be handled by the CE */ @@ -105,7 +109,7 @@ static int sl3516_ce_cipher_fallback(struct skcipher_request *areq) struct sl3516_ce_alg_template *algt; int err; - algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher); + algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base); algt->stat_fb++; skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); @@ -136,7 +140,7 @@ static int sl3516_ce_cipher(struct skcipher_request *areq) int err = 0; int i; - algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher); + algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base); dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, crypto_tfm_alg_name(areq->base.tfm), @@ -258,7 +262,7 @@ theend: return err; } -static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq) +int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq) { int err; struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); @@ -318,7 +322,7 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm) memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx)); - algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher); + algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base); op->ce = algt->ce; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -335,8 +339,6 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm) crypto_tfm_alg_driver_name(&sktfm->base), crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); - op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request; - err = pm_runtime_get_sync(op->ce->dev); if (err < 0) goto error_pm; diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c index b7524b649068..0fd47ab9df5c 100644 --- a/drivers/crypto/gemini/sl3516-ce-core.c +++ b/drivers/crypto/gemini/sl3516-ce-core.c @@ -6,22 +6,25 @@ * * Core file which registers crypto algorithms supported by the CryptoEngine */ + +#include +#include +#include #include -#include #include #include #include +#include #include #include #include +#include #include #include #include #include #include #include -#include -#include #include "sl3516-ce.h" @@ -217,7 +220,7 @@ static struct sl3516_ce_alg_template ce_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, .mode = ECB_AES, - .alg.skcipher = { + .alg.skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-sl3516", @@ -236,11 +239,13 @@ static struct sl3516_ce_alg_template ce_algs[] = { .setkey = sl3516_ce_aes_setkey, .encrypt = sl3516_ce_skencrypt, .decrypt = sl3516_ce_skdecrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = sl3516_ce_handle_cipher_request, + }, }, }; -#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v) { struct sl3516_ce_dev *ce = seq->private; @@ -264,8 +269,8 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v) switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", - ce_algs[i].alg.skcipher.base.cra_driver_name, - ce_algs[i].alg.skcipher.base.cra_name, + ce_algs[i].alg.skcipher.base.base.cra_driver_name, + ce_algs[i].alg.skcipher.base.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); break; } @@ -274,7 +279,6 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs); -#endif static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce) { @@ -286,11 +290,11 @@ static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce) switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(ce->dev, "DEBUG: Register %s\n", - ce_algs[i].alg.skcipher.base.cra_name); - err = crypto_register_skcipher(&ce_algs[i].alg.skcipher); + ce_algs[i].alg.skcipher.base.base.cra_name); + err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher); if (err) { dev_err(ce->dev, "Fail to register %s\n", - ce_algs[i].alg.skcipher.base.cra_name); + ce_algs[i].alg.skcipher.base.base.cra_name); ce_algs[i].ce = NULL; return err; } @@ -313,8 +317,8 @@ static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce) switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(ce->dev, "Unregister %d %s\n", i, - ce_algs[i].alg.skcipher.base.cra_name); - crypto_unregister_skcipher(&ce_algs[i].alg.skcipher); + ce_algs[i].alg.skcipher.base.base.cra_name); + crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher); break; } } @@ -473,13 +477,20 @@ static int sl3516_ce_probe(struct platform_device *pdev) pm_runtime_put_sync(ce->dev); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) { + struct dentry *dbgfs_dir __maybe_unused; + struct dentry *dbgfs_stats __maybe_unused; + + /* Ignore error of debugfs */ + dbgfs_dir = debugfs_create_dir("sl3516", NULL); + dbgfs_stats = debugfs_create_file("stats", 0444, + dbgfs_dir, ce, + &sl3516_ce_debugfs_fops); #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG - /* Ignore error of debugfs */ - ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL); - ce->dbgfs_stats = debugfs_create_file("stats", 0444, - ce->dbgfs_dir, ce, - &sl3516_ce_debugfs_fops); + ce->dbgfs_dir = dbgfs_dir; + ce->dbgfs_stats = dbgfs_stats; #endif + } return 0; error_pmuse: diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h index 4c0ec6c920d1..9e1a7e7f8961 100644 --- a/drivers/crypto/gemini/sl3516-ce.h +++ b/drivers/crypto/gemini/sl3516-ce.h @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -292,16 +291,12 @@ struct sl3516_ce_cipher_req_ctx { /* * struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM - * @enginectx: crypto_engine used by this TFM * @key: pointer to key data * @keylen: len of the key * @ce: pointer to the private data of driver handling this TFM * @fallback_tfm: pointer to the fallback TFM - * - * enginectx must be the first element */ struct sl3516_ce_cipher_tfm_ctx { - struct crypto_engine_ctx enginectx; u32 *key; u32 keylen; struct sl3516_ce_dev *ce; @@ -324,7 +319,7 @@ struct sl3516_ce_alg_template { u32 mode; struct sl3516_ce_dev *ce; union { - struct skcipher_alg skcipher; + struct skcipher_engine_alg skcipher; } alg; unsigned long stat_req; unsigned long stat_fb; @@ -345,3 +340,4 @@ int sl3516_ce_run_task(struct sl3516_ce_dev *ce, int sl3516_ce_rng_register(struct sl3516_ce_dev *ce); void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce); +int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq); From 530d7b009d8cd66a009f13090a7fff1764b3936c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:06 +0800 Subject: [PATCH 137/149] crypto: keembay - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- .../intel/keembay/keembay-ocs-aes-core.c | 427 +++++++++--------- .../crypto/intel/keembay/keembay-ocs-ecc.c | 71 ++- .../intel/keembay/keembay-ocs-hcu-core.c | 232 +++++----- 3 files changed, 360 insertions(+), 370 deletions(-) diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c index f94f48289683..1e2fd9a754ec 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c @@ -5,24 +5,23 @@ * Copyright (C) 2018-2020 Intel Corporation */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include #include #include -#include - #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "ocs-aes.h" @@ -38,7 +37,6 @@ /** * struct ocs_aes_tctx - OCS AES Transform context - * @engine_ctx: Engine context. * @aes_dev: The OCS AES device. * @key: AES/SM4 key. * @key_len: The length (in bytes) of @key. @@ -47,7 +45,6 @@ * @use_fallback: Whether or not fallback cipher should be used. */ struct ocs_aes_tctx { - struct crypto_engine_ctx engine_ctx; struct ocs_aes_dev *aes_dev; u8 key[OCS_AES_KEYSIZE_256]; unsigned int key_len; @@ -1148,13 +1145,6 @@ static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req) return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM); } -static inline int ocs_common_init(struct ocs_aes_tctx *tctx) -{ - tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request; - - return 0; -} - static int ocs_aes_init_tfm(struct crypto_skcipher *tfm) { const char *alg_name = crypto_tfm_alg_name(&tfm->base); @@ -1170,16 +1160,14 @@ static int ocs_aes_init_tfm(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx)); - return ocs_common_init(tctx); + return 0; } static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm) { - struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm); - crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx)); - return ocs_common_init(tctx); + return 0; } static inline void clear_key(struct ocs_aes_tctx *tctx) @@ -1204,13 +1192,6 @@ static void ocs_exit_tfm(struct crypto_skcipher *tfm) } } -static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx) -{ - tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request; - - return 0; -} - static int ocs_aes_aead_cra_init(struct crypto_aead *tfm) { const char *alg_name = crypto_tfm_alg_name(&tfm->base); @@ -1229,7 +1210,7 @@ static int ocs_aes_aead_cra_init(struct crypto_aead *tfm) (sizeof(struct aead_request) + crypto_aead_reqsize(tctx->sw_cipher.aead)))); - return ocs_common_aead_init(tctx); + return 0; } static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm, @@ -1257,11 +1238,9 @@ static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm, static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm) { - struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm); - crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx)); - return ocs_common_aead_init(tctx); + return 0; } static void ocs_aead_cra_exit(struct crypto_aead *tfm) @@ -1276,182 +1255,190 @@ static void ocs_aead_cra_exit(struct crypto_aead *tfm) } } -static struct skcipher_alg algs[] = { +static struct skcipher_engine_alg algs[] = { #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB { - .base.cra_name = "ecb(aes)", - .base.cra_driver_name = "ecb-aes-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "ecb(aes)", + .base.base.cra_driver_name = "ecb-aes-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.base.cra_blocksize = AES_BLOCK_SIZE, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_AES_MIN_KEY_SIZE, - .max_keysize = OCS_AES_MAX_KEY_SIZE, - .setkey = kmb_ocs_aes_set_key, - .encrypt = kmb_ocs_aes_ecb_encrypt, - .decrypt = kmb_ocs_aes_ecb_decrypt, - .init = ocs_aes_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_AES_MIN_KEY_SIZE, + .base.max_keysize = OCS_AES_MAX_KEY_SIZE, + .base.setkey = kmb_ocs_aes_set_key, + .base.encrypt = kmb_ocs_aes_ecb_encrypt, + .base.decrypt = kmb_ocs_aes_ecb_decrypt, + .base.init = ocs_aes_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, }, #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */ { - .base.cra_name = "cbc(aes)", - .base.cra_driver_name = "cbc-aes-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "cbc(aes)", + .base.base.cra_driver_name = "cbc-aes-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.base.cra_blocksize = AES_BLOCK_SIZE, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_AES_MIN_KEY_SIZE, - .max_keysize = OCS_AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = kmb_ocs_aes_set_key, - .encrypt = kmb_ocs_aes_cbc_encrypt, - .decrypt = kmb_ocs_aes_cbc_decrypt, - .init = ocs_aes_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_AES_MIN_KEY_SIZE, + .base.max_keysize = OCS_AES_MAX_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.setkey = kmb_ocs_aes_set_key, + .base.encrypt = kmb_ocs_aes_cbc_encrypt, + .base.decrypt = kmb_ocs_aes_cbc_decrypt, + .base.init = ocs_aes_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, }, { - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "ctr-aes-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "ctr(aes)", + .base.base.cra_driver_name = "ctr-aes-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.base.cra_blocksize = 1, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_AES_MIN_KEY_SIZE, - .max_keysize = OCS_AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = kmb_ocs_aes_set_key, - .encrypt = kmb_ocs_aes_ctr_encrypt, - .decrypt = kmb_ocs_aes_ctr_decrypt, - .init = ocs_aes_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_AES_MIN_KEY_SIZE, + .base.max_keysize = OCS_AES_MAX_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.setkey = kmb_ocs_aes_set_key, + .base.encrypt = kmb_ocs_aes_ctr_encrypt, + .base.decrypt = kmb_ocs_aes_ctr_decrypt, + .base.init = ocs_aes_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, }, #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS { - .base.cra_name = "cts(cbc(aes))", - .base.cra_driver_name = "cts-aes-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "cts(cbc(aes))", + .base.base.cra_driver_name = "cts-aes-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.base.cra_blocksize = AES_BLOCK_SIZE, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_AES_MIN_KEY_SIZE, - .max_keysize = OCS_AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = kmb_ocs_aes_set_key, - .encrypt = kmb_ocs_aes_cts_encrypt, - .decrypt = kmb_ocs_aes_cts_decrypt, - .init = ocs_aes_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_AES_MIN_KEY_SIZE, + .base.max_keysize = OCS_AES_MAX_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.setkey = kmb_ocs_aes_set_key, + .base.encrypt = kmb_ocs_aes_cts_encrypt, + .base.decrypt = kmb_ocs_aes_cts_decrypt, + .base.init = ocs_aes_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, }, #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */ #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB { - .base.cra_name = "ecb(sm4)", - .base.cra_driver_name = "ecb-sm4-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "ecb(sm4)", + .base.base.cra_driver_name = "ecb-sm4-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.base.cra_blocksize = AES_BLOCK_SIZE, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_SM4_KEY_SIZE, - .max_keysize = OCS_SM4_KEY_SIZE, - .setkey = kmb_ocs_sm4_set_key, - .encrypt = kmb_ocs_sm4_ecb_encrypt, - .decrypt = kmb_ocs_sm4_ecb_decrypt, - .init = ocs_sm4_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_SM4_KEY_SIZE, + .base.max_keysize = OCS_SM4_KEY_SIZE, + .base.setkey = kmb_ocs_sm4_set_key, + .base.encrypt = kmb_ocs_sm4_ecb_encrypt, + .base.decrypt = kmb_ocs_sm4_ecb_decrypt, + .base.init = ocs_sm4_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, }, #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */ { - .base.cra_name = "cbc(sm4)", - .base.cra_driver_name = "cbc-sm4-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "cbc(sm4)", + .base.base.cra_driver_name = "cbc-sm4-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.base.cra_blocksize = AES_BLOCK_SIZE, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_SM4_KEY_SIZE, - .max_keysize = OCS_SM4_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = kmb_ocs_sm4_set_key, - .encrypt = kmb_ocs_sm4_cbc_encrypt, - .decrypt = kmb_ocs_sm4_cbc_decrypt, - .init = ocs_sm4_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_SM4_KEY_SIZE, + .base.max_keysize = OCS_SM4_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.setkey = kmb_ocs_sm4_set_key, + .base.encrypt = kmb_ocs_sm4_cbc_encrypt, + .base.decrypt = kmb_ocs_sm4_cbc_decrypt, + .base.init = ocs_sm4_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, }, { - .base.cra_name = "ctr(sm4)", - .base.cra_driver_name = "ctr-sm4-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "ctr(sm4)", + .base.base.cra_driver_name = "ctr-sm4-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.base.cra_blocksize = 1, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_SM4_KEY_SIZE, - .max_keysize = OCS_SM4_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = kmb_ocs_sm4_set_key, - .encrypt = kmb_ocs_sm4_ctr_encrypt, - .decrypt = kmb_ocs_sm4_ctr_decrypt, - .init = ocs_sm4_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_SM4_KEY_SIZE, + .base.max_keysize = OCS_SM4_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.setkey = kmb_ocs_sm4_set_key, + .base.encrypt = kmb_ocs_sm4_ctr_encrypt, + .base.decrypt = kmb_ocs_sm4_ctr_decrypt, + .base.init = ocs_sm4_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, }, #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS { - .base.cra_name = "cts(cbc(sm4))", - .base.cra_driver_name = "cts-sm4-keembay-ocs", - .base.cra_priority = KMB_OCS_PRIORITY, - .base.cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct ocs_aes_tctx), - .base.cra_module = THIS_MODULE, - .base.cra_alignmask = 0, + .base.base.cra_name = "cts(cbc(sm4))", + .base.base.cra_driver_name = "cts-sm4-keembay-ocs", + .base.base.cra_priority = KMB_OCS_PRIORITY, + .base.base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .base.base.cra_blocksize = AES_BLOCK_SIZE, + .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx), + .base.base.cra_module = THIS_MODULE, + .base.base.cra_alignmask = 0, - .min_keysize = OCS_SM4_KEY_SIZE, - .max_keysize = OCS_SM4_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = kmb_ocs_sm4_set_key, - .encrypt = kmb_ocs_sm4_cts_encrypt, - .decrypt = kmb_ocs_sm4_cts_decrypt, - .init = ocs_sm4_init_tfm, - .exit = ocs_exit_tfm, + .base.min_keysize = OCS_SM4_KEY_SIZE, + .base.max_keysize = OCS_SM4_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.setkey = kmb_ocs_sm4_set_key, + .base.encrypt = kmb_ocs_sm4_cts_encrypt, + .base.decrypt = kmb_ocs_sm4_cts_decrypt, + .base.init = ocs_sm4_init_tfm, + .base.exit = ocs_exit_tfm, + .op.do_one_request = kmb_ocs_aes_sk_do_one_request, } #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */ }; -static struct aead_alg algs_aead[] = { +static struct aead_engine_alg algs_aead[] = { { - .base = { + .base.base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-keembay-ocs", .cra_priority = KMB_OCS_PRIORITY, @@ -1463,17 +1450,18 @@ static struct aead_alg algs_aead[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, }, - .init = ocs_aes_aead_cra_init, - .exit = ocs_aead_cra_exit, - .ivsize = GCM_AES_IV_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setauthsize = kmb_ocs_aead_gcm_setauthsize, - .setkey = kmb_ocs_aes_aead_set_key, - .encrypt = kmb_ocs_aes_gcm_encrypt, - .decrypt = kmb_ocs_aes_gcm_decrypt, + .base.init = ocs_aes_aead_cra_init, + .base.exit = ocs_aead_cra_exit, + .base.ivsize = GCM_AES_IV_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.setauthsize = kmb_ocs_aead_gcm_setauthsize, + .base.setkey = kmb_ocs_aes_aead_set_key, + .base.encrypt = kmb_ocs_aes_gcm_encrypt, + .base.decrypt = kmb_ocs_aes_gcm_decrypt, + .op.do_one_request = kmb_ocs_aes_aead_do_one_request, }, { - .base = { + .base.base = { .cra_name = "ccm(aes)", .cra_driver_name = "ccm-aes-keembay-ocs", .cra_priority = KMB_OCS_PRIORITY, @@ -1485,17 +1473,18 @@ static struct aead_alg algs_aead[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, }, - .init = ocs_aes_aead_cra_init, - .exit = ocs_aead_cra_exit, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setauthsize = kmb_ocs_aead_ccm_setauthsize, - .setkey = kmb_ocs_aes_aead_set_key, - .encrypt = kmb_ocs_aes_ccm_encrypt, - .decrypt = kmb_ocs_aes_ccm_decrypt, + .base.init = ocs_aes_aead_cra_init, + .base.exit = ocs_aead_cra_exit, + .base.ivsize = AES_BLOCK_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.setauthsize = kmb_ocs_aead_ccm_setauthsize, + .base.setkey = kmb_ocs_aes_aead_set_key, + .base.encrypt = kmb_ocs_aes_ccm_encrypt, + .base.decrypt = kmb_ocs_aes_ccm_decrypt, + .op.do_one_request = kmb_ocs_aes_aead_do_one_request, }, { - .base = { + .base.base = { .cra_name = "gcm(sm4)", .cra_driver_name = "gcm-sm4-keembay-ocs", .cra_priority = KMB_OCS_PRIORITY, @@ -1506,17 +1495,18 @@ static struct aead_alg algs_aead[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, }, - .init = ocs_sm4_aead_cra_init, - .exit = ocs_aead_cra_exit, - .ivsize = GCM_AES_IV_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setauthsize = kmb_ocs_aead_gcm_setauthsize, - .setkey = kmb_ocs_sm4_aead_set_key, - .encrypt = kmb_ocs_sm4_gcm_encrypt, - .decrypt = kmb_ocs_sm4_gcm_decrypt, + .base.init = ocs_sm4_aead_cra_init, + .base.exit = ocs_aead_cra_exit, + .base.ivsize = GCM_AES_IV_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.setauthsize = kmb_ocs_aead_gcm_setauthsize, + .base.setkey = kmb_ocs_sm4_aead_set_key, + .base.encrypt = kmb_ocs_sm4_gcm_encrypt, + .base.decrypt = kmb_ocs_sm4_gcm_decrypt, + .op.do_one_request = kmb_ocs_aes_aead_do_one_request, }, { - .base = { + .base.base = { .cra_name = "ccm(sm4)", .cra_driver_name = "ccm-sm4-keembay-ocs", .cra_priority = KMB_OCS_PRIORITY, @@ -1527,21 +1517,22 @@ static struct aead_alg algs_aead[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, }, - .init = ocs_sm4_aead_cra_init, - .exit = ocs_aead_cra_exit, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setauthsize = kmb_ocs_aead_ccm_setauthsize, - .setkey = kmb_ocs_sm4_aead_set_key, - .encrypt = kmb_ocs_sm4_ccm_encrypt, - .decrypt = kmb_ocs_sm4_ccm_decrypt, + .base.init = ocs_sm4_aead_cra_init, + .base.exit = ocs_aead_cra_exit, + .base.ivsize = AES_BLOCK_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.setauthsize = kmb_ocs_aead_ccm_setauthsize, + .base.setkey = kmb_ocs_sm4_aead_set_key, + .base.encrypt = kmb_ocs_sm4_ccm_encrypt, + .base.decrypt = kmb_ocs_sm4_ccm_decrypt, + .op.do_one_request = kmb_ocs_aes_aead_do_one_request, } }; static void unregister_aes_algs(struct ocs_aes_dev *aes_dev) { - crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead)); - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); + crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead)); + crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs)); } static int register_aes_algs(struct ocs_aes_dev *aes_dev) @@ -1552,13 +1543,13 @@ static int register_aes_algs(struct ocs_aes_dev *aes_dev) * If any algorithm fails to register, all preceding algorithms that * were successfully registered will be automatically unregistered. */ - ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead)); + ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead)); if (ret) return ret; - ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs)); + ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs)); if (ret) - crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs)); + crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs)); return ret; } diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c index e91e570b7ae0..fb95deed9057 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c @@ -7,30 +7,27 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include +#include #include #include #include #include #include +#include #include #include #include #include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include +#include #define DRV_NAME "keembay-ocs-ecc" @@ -95,13 +92,11 @@ struct ocs_ecc_dev { /** * struct ocs_ecc_ctx - Transformation context. - * @engine_ctx: Crypto engine ctx. * @ecc_dev: The ECC driver associated with this context. * @curve: The elliptic curve used by this transformation. * @private_key: The private key. */ struct ocs_ecc_ctx { - struct crypto_engine_ctx engine_ctx; struct ocs_ecc_dev *ecc_dev; const struct ecc_curve *curve; u64 private_key[KMB_ECC_VLI_MAX_DIGITS]; @@ -794,8 +789,6 @@ static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id) if (!tctx->curve) return -EOPNOTSUPP; - tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request; - return 0; } @@ -828,36 +821,38 @@ static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm) return digits_to_bytes(tctx->curve->g.ndigits) * 2; } -static struct kpp_alg ocs_ecdh_p256 = { - .set_secret = kmb_ocs_ecdh_set_secret, - .generate_public_key = kmb_ocs_ecdh_generate_public_key, - .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret, - .init = kmb_ocs_ecdh_nist_p256_init_tfm, - .exit = kmb_ocs_ecdh_exit_tfm, - .max_size = kmb_ocs_ecdh_max_size, - .base = { +static struct kpp_engine_alg ocs_ecdh_p256 = { + .base.set_secret = kmb_ocs_ecdh_set_secret, + .base.generate_public_key = kmb_ocs_ecdh_generate_public_key, + .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret, + .base.init = kmb_ocs_ecdh_nist_p256_init_tfm, + .base.exit = kmb_ocs_ecdh_exit_tfm, + .base.max_size = kmb_ocs_ecdh_max_size, + .base.base = { .cra_name = "ecdh-nist-p256", .cra_driver_name = "ecdh-nist-p256-keembay-ocs", .cra_priority = KMB_OCS_ECC_PRIORITY, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct ocs_ecc_ctx), }, + .op.do_one_request = kmb_ocs_ecc_do_one_request, }; -static struct kpp_alg ocs_ecdh_p384 = { - .set_secret = kmb_ocs_ecdh_set_secret, - .generate_public_key = kmb_ocs_ecdh_generate_public_key, - .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret, - .init = kmb_ocs_ecdh_nist_p384_init_tfm, - .exit = kmb_ocs_ecdh_exit_tfm, - .max_size = kmb_ocs_ecdh_max_size, - .base = { +static struct kpp_engine_alg ocs_ecdh_p384 = { + .base.set_secret = kmb_ocs_ecdh_set_secret, + .base.generate_public_key = kmb_ocs_ecdh_generate_public_key, + .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret, + .base.init = kmb_ocs_ecdh_nist_p384_init_tfm, + .base.exit = kmb_ocs_ecdh_exit_tfm, + .base.max_size = kmb_ocs_ecdh_max_size, + .base.base = { .cra_name = "ecdh-nist-p384", .cra_driver_name = "ecdh-nist-p384-keembay-ocs", .cra_priority = KMB_OCS_ECC_PRIORITY, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct ocs_ecc_ctx), }, + .op.do_one_request = kmb_ocs_ecc_do_one_request, }; static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id) @@ -939,14 +934,14 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev) } /* Register the KPP algo. */ - rc = crypto_register_kpp(&ocs_ecdh_p256); + rc = crypto_engine_register_kpp(&ocs_ecdh_p256); if (rc) { dev_err(dev, "Could not register OCS algorithms with Crypto API\n"); goto cleanup; } - rc = crypto_register_kpp(&ocs_ecdh_p384); + rc = crypto_engine_register_kpp(&ocs_ecdh_p384); if (rc) { dev_err(dev, "Could not register OCS algorithms with Crypto API\n"); @@ -956,7 +951,7 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev) return 0; ocs_ecdh_p384_error: - crypto_unregister_kpp(&ocs_ecdh_p256); + crypto_engine_unregister_kpp(&ocs_ecdh_p256); cleanup: crypto_engine_exit(ecc_dev->engine); @@ -975,8 +970,8 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev) ecc_dev = platform_get_drvdata(pdev); - crypto_unregister_kpp(&ocs_ecdh_p384); - crypto_unregister_kpp(&ocs_ecdh_p256); + crypto_engine_unregister_kpp(&ocs_ecdh_p384); + crypto_engine_unregister_kpp(&ocs_ecdh_p256); spin_lock(&ocs_ecc.lock); list_del(&ecc_dev->list); diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index 51a6de6294cb..57a20281ead8 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -5,19 +5,20 @@ * Copyright (C) 2018-2020 Intel Corporation */ -#include -#include -#include -#include -#include -#include - #include +#include +#include #include #include #include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include #include "ocs-hcu.h" @@ -34,7 +35,6 @@ /** * struct ocs_hcu_ctx: OCS HCU Transform context. - * @engine_ctx: Crypto Engine context. * @hcu_dev: The OCS HCU device used by the transformation. * @key: The key (used only for HMAC transformations). * @key_len: The length of the key. @@ -42,7 +42,6 @@ * @is_hmac_tfm: Whether or not this is a HMAC transformation. */ struct ocs_hcu_ctx { - struct crypto_engine_ctx engine_ctx; struct ocs_hcu_dev *hcu_dev; u8 key[SHA512_BLOCK_SIZE]; size_t key_len; @@ -824,11 +823,6 @@ static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx) { crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), sizeof(struct ocs_hcu_rctx)); - - /* Init context to 0. */ - memzero_explicit(ctx, sizeof(*ctx)); - /* Set engine ops. */ - ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request; } static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm) @@ -883,17 +877,17 @@ static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm) memzero_explicit(ctx->key, sizeof(ctx->key)); } -static struct ahash_alg ocs_hcu_algs[] = { +static struct ahash_engine_alg ocs_hcu_algs[] = { #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -907,18 +901,19 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_module = THIS_MODULE, .cra_init = kmb_ocs_hcu_sha_cra_init, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .setkey = kmb_ocs_hcu_setkey, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.setkey = kmb_ocs_hcu_setkey, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -933,18 +928,19 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_init = kmb_ocs_hcu_hmac_cra_init, .cra_exit = kmb_ocs_hcu_hmac_cra_exit, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */ { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -958,18 +954,19 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_module = THIS_MODULE, .cra_init = kmb_ocs_hcu_sha_cra_init, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .setkey = kmb_ocs_hcu_setkey, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.setkey = kmb_ocs_hcu_setkey, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -984,17 +981,18 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_init = kmb_ocs_hcu_hmac_cra_init, .cra_exit = kmb_ocs_hcu_hmac_cra_exit, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.halg = { .digestsize = SM3_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -1008,18 +1006,19 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_module = THIS_MODULE, .cra_init = kmb_ocs_hcu_sm3_cra_init, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .setkey = kmb_ocs_hcu_setkey, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.setkey = kmb_ocs_hcu_setkey, + .base.halg = { .digestsize = SM3_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -1034,17 +1033,18 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_init = kmb_ocs_hcu_hmac_sm3_cra_init, .cra_exit = kmb_ocs_hcu_hmac_cra_exit, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -1058,18 +1058,19 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_module = THIS_MODULE, .cra_init = kmb_ocs_hcu_sha_cra_init, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .setkey = kmb_ocs_hcu_setkey, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.setkey = kmb_ocs_hcu_setkey, + .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -1084,17 +1085,18 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_init = kmb_ocs_hcu_hmac_cra_init, .cra_exit = kmb_ocs_hcu_hmac_cra_exit, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -1108,18 +1110,19 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_module = THIS_MODULE, .cra_init = kmb_ocs_hcu_sha_cra_init, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, { - .init = kmb_ocs_hcu_init, - .update = kmb_ocs_hcu_update, - .final = kmb_ocs_hcu_final, - .finup = kmb_ocs_hcu_finup, - .digest = kmb_ocs_hcu_digest, - .export = kmb_ocs_hcu_export, - .import = kmb_ocs_hcu_import, - .setkey = kmb_ocs_hcu_setkey, - .halg = { + .base.init = kmb_ocs_hcu_init, + .base.update = kmb_ocs_hcu_update, + .base.final = kmb_ocs_hcu_final, + .base.finup = kmb_ocs_hcu_finup, + .base.digest = kmb_ocs_hcu_digest, + .base.export = kmb_ocs_hcu_export, + .base.import = kmb_ocs_hcu_import, + .base.setkey = kmb_ocs_hcu_setkey, + .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct ocs_hcu_rctx), .base = { @@ -1134,7 +1137,8 @@ static struct ahash_alg ocs_hcu_algs[] = { .cra_init = kmb_ocs_hcu_hmac_cra_init, .cra_exit = kmb_ocs_hcu_hmac_cra_exit, } - } + }, + .op.do_one_request = kmb_ocs_hcu_do_one_request, }, }; @@ -1155,7 +1159,7 @@ static int kmb_ocs_hcu_remove(struct platform_device *pdev) if (!hcu_dev) return -ENODEV; - crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs)); + crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs)); rc = crypto_engine_exit(hcu_dev->engine); @@ -1223,7 +1227,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) /* Security infrastructure guarantees OCS clock is enabled. */ - rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs)); + rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs)); if (rc) { dev_err(dev, "Could not register algorithms.\n"); goto cleanup; From 03906fba750ed468b8ea3484afc650454ce45bcc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:08 +0800 Subject: [PATCH 138/149] crypto: omap - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes-gcm.c | 25 ++-- drivers/crypto/omap-aes.c | 210 ++++++++++++++------------ drivers/crypto/omap-aes.h | 15 +- drivers/crypto/omap-des.c | 181 +++++++++++----------- drivers/crypto/omap-sham.c | 274 +++++++++++++++++----------------- 5 files changed, 365 insertions(+), 340 deletions(-) diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c index d02363e976e7..c498950402e8 100644 --- a/drivers/crypto/omap-aes-gcm.c +++ b/drivers/crypto/omap-aes-gcm.c @@ -7,18 +7,21 @@ * Copyright (c) 2016 Texas Instruments Incorporated */ -#include -#include -#include -#include -#include -#include -#include #include +#include #include +#include #include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "omap-crypto.h" #include "omap-aes.h" @@ -354,7 +357,7 @@ int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent, return crypto_rfc4106_check_authsize(authsize); } -static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq) +int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = container_of(areq, struct aead_request, base); @@ -379,10 +382,6 @@ static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq) int omap_aes_gcm_cra_init(struct crypto_aead *tfm) { - struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); - - ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req; - crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); return 0; diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index ad0d8db086db..ea1331218105 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "omap-crypto.h" #include "omap-aes.h" @@ -638,8 +639,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) + crypto_skcipher_reqsize(blk)); - ctx->enginectx.op.do_one_request = omap_aes_crypt_req; - return 0; } @@ -655,68 +654,77 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) /* ********************** ALGS ************************************ */ -static struct skcipher_alg algs_ecb_cbc[] = { +static struct skcipher_engine_alg algs_ecb_cbc[] = { { - .base.cra_name = "ecb(aes)", - .base.cra_driver_name = "ecb-aes-omap", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct omap_aes_ctx), - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_aes_ctx), + .base.cra_module = THIS_MODULE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = omap_aes_setkey, - .encrypt = omap_aes_ecb_encrypt, - .decrypt = omap_aes_ecb_decrypt, - .init = omap_aes_init_tfm, - .exit = omap_aes_exit_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_ecb_encrypt, + .decrypt = omap_aes_ecb_decrypt, + .init = omap_aes_init_tfm, + .exit = omap_aes_exit_tfm, + }, + .op.do_one_request = omap_aes_crypt_req, }, { - .base.cra_name = "cbc(aes)", - .base.cra_driver_name = "cbc-aes-omap", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct omap_aes_ctx), - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_aes_ctx), + .base.cra_module = THIS_MODULE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = omap_aes_setkey, - .encrypt = omap_aes_cbc_encrypt, - .decrypt = omap_aes_cbc_decrypt, - .init = omap_aes_init_tfm, - .exit = omap_aes_exit_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_cbc_encrypt, + .decrypt = omap_aes_cbc_decrypt, + .init = omap_aes_init_tfm, + .exit = omap_aes_exit_tfm, + }, + .op.do_one_request = omap_aes_crypt_req, } }; -static struct skcipher_alg algs_ctr[] = { +static struct skcipher_engine_alg algs_ctr[] = { { - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "ctr-aes-omap", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct omap_aes_ctx), - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-aes-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct omap_aes_ctx), + .base.cra_module = THIS_MODULE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = omap_aes_setkey, - .encrypt = omap_aes_ctr_encrypt, - .decrypt = omap_aes_ctr_decrypt, - .init = omap_aes_init_tfm, - .exit = omap_aes_exit_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = omap_aes_setkey, + .encrypt = omap_aes_ctr_encrypt, + .decrypt = omap_aes_ctr_decrypt, + .init = omap_aes_init_tfm, + .exit = omap_aes_exit_tfm, + }, + .op.do_one_request = omap_aes_crypt_req, } }; @@ -727,46 +735,52 @@ static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { }, }; -static struct aead_alg algs_aead_gcm[] = { +static struct aead_engine_alg algs_aead_gcm[] = { { .base = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-omap", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-omap", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, + .init = omap_aes_gcm_cra_init, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = omap_aes_gcm_setkey, + .setauthsize = omap_aes_gcm_setauthsize, + .encrypt = omap_aes_gcm_encrypt, + .decrypt = omap_aes_gcm_decrypt, }, - .init = omap_aes_gcm_cra_init, - .ivsize = GCM_AES_IV_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = omap_aes_gcm_setkey, - .setauthsize = omap_aes_gcm_setauthsize, - .encrypt = omap_aes_gcm_encrypt, - .decrypt = omap_aes_gcm_decrypt, + .op.do_one_request = omap_aes_gcm_crypt_req, }, { .base = { - .cra_name = "rfc4106(gcm(aes))", - .cra_driver_name = "rfc4106-gcm-aes-omap", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_KERN_DRIVER_ONLY, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-omap", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, + .init = omap_aes_gcm_cra_init, + .maxauthsize = AES_BLOCK_SIZE, + .ivsize = GCM_RFC4106_IV_SIZE, + .setkey = omap_aes_4106gcm_setkey, + .setauthsize = omap_aes_4106gcm_setauthsize, + .encrypt = omap_aes_4106gcm_encrypt, + .decrypt = omap_aes_4106gcm_decrypt, }, - .init = omap_aes_gcm_cra_init, - .maxauthsize = AES_BLOCK_SIZE, - .ivsize = GCM_RFC4106_IV_SIZE, - .setkey = omap_aes_4106gcm_setkey, - .setauthsize = omap_aes_4106gcm_setauthsize, - .encrypt = omap_aes_4106gcm_encrypt, - .decrypt = omap_aes_4106gcm_decrypt, + .op.do_one_request = omap_aes_gcm_crypt_req, }, }; @@ -1088,8 +1102,8 @@ static int omap_aes_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct omap_aes_dev *dd; - struct skcipher_alg *algp; - struct aead_alg *aalg; + struct skcipher_engine_alg *algp; + struct aead_engine_alg *aalg; struct resource res; int err = -ENOMEM, i, j, irq = -1; u32 reg; @@ -1182,9 +1196,9 @@ static int omap_aes_probe(struct platform_device *pdev) for (j = 0; j < dd->pdata->algs_info[i].size; j++) { algp = &dd->pdata->algs_info[i].algs_list[j]; - pr_debug("reg alg: %s\n", algp->base.cra_name); + pr_debug("reg alg: %s\n", algp->base.base.cra_name); - err = crypto_register_skcipher(algp); + err = crypto_engine_register_skcipher(algp); if (err) goto err_algs; @@ -1198,9 +1212,9 @@ static int omap_aes_probe(struct platform_device *pdev) for (i = 0; i < dd->pdata->aead_algs_info->size; i++) { aalg = &dd->pdata->aead_algs_info->algs_list[i]; - pr_debug("reg alg: %s\n", aalg->base.cra_name); + pr_debug("reg alg: %s\n", aalg->base.base.cra_name); - err = crypto_register_aead(aalg); + err = crypto_engine_register_aead(aalg); if (err) goto err_aead_algs; @@ -1218,12 +1232,12 @@ static int omap_aes_probe(struct platform_device *pdev) err_aead_algs: for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { aalg = &dd->pdata->aead_algs_info->algs_list[i]; - crypto_unregister_aead(aalg); + crypto_engine_unregister_aead(aalg); } err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) - crypto_unregister_skcipher( + crypto_engine_unregister_skcipher( &dd->pdata->algs_info[i].algs_list[j]); err_engine: @@ -1245,7 +1259,7 @@ err_data: static int omap_aes_remove(struct platform_device *pdev) { struct omap_aes_dev *dd = platform_get_drvdata(pdev); - struct aead_alg *aalg; + struct aead_engine_alg *aalg; int i, j; spin_lock_bh(&list_lock); @@ -1254,14 +1268,14 @@ static int omap_aes_remove(struct platform_device *pdev) for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { - crypto_unregister_skcipher( + crypto_engine_unregister_skcipher( &dd->pdata->algs_info[i].algs_list[j]); dd->pdata->algs_info[i].registered--; } for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { aalg = &dd->pdata->aead_algs_info->algs_list[i]; - crypto_unregister_aead(aalg); + crypto_engine_unregister_aead(aalg); dd->pdata->aead_algs_info->registered--; } diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h index 23d073e87bb8..0f35c9164764 100644 --- a/drivers/crypto/omap-aes.h +++ b/drivers/crypto/omap-aes.h @@ -10,7 +10,6 @@ #define __OMAP_AES_H__ #include -#include #define DST_MAXBURST 4 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) @@ -93,7 +92,6 @@ struct omap_aes_gcm_result { }; struct omap_aes_ctx { - struct crypto_engine_ctx enginectx; int keylen; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; u8 nonce[4]; @@ -117,15 +115,15 @@ struct omap_aes_reqctx { #define OMAP_AES_CACHE_SIZE 0 struct omap_aes_algs_info { - struct skcipher_alg *algs_list; - unsigned int size; - unsigned int registered; + struct skcipher_engine_alg *algs_list; + unsigned int size; + unsigned int registered; }; struct omap_aes_aead_algs { - struct aead_alg *algs_list; - unsigned int size; - unsigned int registered; + struct aead_engine_alg *algs_list; + unsigned int size; + unsigned int registered; }; struct omap_aes_pdata { @@ -218,5 +216,6 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd); int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd); void omap_aes_gcm_dma_out_callback(void *data); void omap_aes_clear_copy_flags(struct omap_aes_dev *dd); +int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq); #endif diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 29a3b4c9edaf..ae9e9e4eb94c 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -16,27 +16,25 @@ #define prx(num) do { } while (0) #endif -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include #include -#include +#include +#include +#include +#include +#include +#include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include "omap-crypto.h" @@ -83,7 +81,6 @@ #define FLAGS_OUT_DATA_ST_SHIFT 10 struct omap_des_ctx { - struct crypto_engine_ctx enginectx; struct omap_des_dev *dd; int keylen; @@ -99,9 +96,9 @@ struct omap_des_reqctx { #define OMAP_DES_CACHE_SIZE 0 struct omap_des_algs_info { - struct skcipher_alg *algs_list; - unsigned int size; - unsigned int registered; + struct skcipher_engine_alg *algs_list; + unsigned int size; + unsigned int registered; }; struct omap_des_pdata { @@ -707,89 +704,97 @@ static int omap_des_cbc_decrypt(struct skcipher_request *req) static int omap_des_init_tfm(struct crypto_skcipher *tfm) { - struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm); - pr_debug("enter\n"); crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx)); - ctx->enginectx.op.do_one_request = omap_des_crypt_req; - return 0; } /* ********************** ALGS ************************************ */ -static struct skcipher_alg algs_ecb_cbc[] = { +static struct skcipher_engine_alg algs_ecb_cbc[] = { { - .base.cra_name = "ecb(des)", - .base.cra_driver_name = "ecb-des-omap", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct omap_des_ctx), - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ecb(des)", + .base.cra_driver_name = "ecb-des-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_des_ctx), + .base.cra_module = THIS_MODULE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = omap_des_setkey, - .encrypt = omap_des_ecb_encrypt, - .decrypt = omap_des_ecb_decrypt, - .init = omap_des_init_tfm, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = omap_des_setkey, + .encrypt = omap_des_ecb_encrypt, + .decrypt = omap_des_ecb_decrypt, + .init = omap_des_init_tfm, + }, + .op.do_one_request = omap_des_crypt_req, }, { - .base.cra_name = "cbc(des)", - .base.cra_driver_name = "cbc-des-omap", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct omap_des_ctx), - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "cbc(des)", + .base.cra_driver_name = "cbc-des-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_des_ctx), + .base.cra_module = THIS_MODULE, - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = omap_des_setkey, - .encrypt = omap_des_cbc_encrypt, - .decrypt = omap_des_cbc_decrypt, - .init = omap_des_init_tfm, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = omap_des_setkey, + .encrypt = omap_des_cbc_encrypt, + .decrypt = omap_des_cbc_decrypt, + .init = omap_des_init_tfm, + }, + .op.do_one_request = omap_des_crypt_req, }, { - .base.cra_name = "ecb(des3_ede)", - .base.cra_driver_name = "ecb-des3-omap", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct omap_des_ctx), - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "ecb-des3-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_des_ctx), + .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .setkey = omap_des3_setkey, - .encrypt = omap_des_ecb_encrypt, - .decrypt = omap_des_ecb_decrypt, - .init = omap_des_init_tfm, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = omap_des3_setkey, + .encrypt = omap_des_ecb_encrypt, + .decrypt = omap_des_ecb_decrypt, + .init = omap_des_init_tfm, + }, + .op.do_one_request = omap_des_crypt_req, }, { - .base.cra_name = "cbc(des3_ede)", - .base.cra_driver_name = "cbc-des3-omap", - .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct omap_des_ctx), - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "cbc-des3-omap", + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct omap_des_ctx), + .base.cra_module = THIS_MODULE, - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = omap_des3_setkey, - .encrypt = omap_des_cbc_encrypt, - .decrypt = omap_des_cbc_decrypt, - .init = omap_des_init_tfm, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = omap_des3_setkey, + .encrypt = omap_des_cbc_encrypt, + .decrypt = omap_des_cbc_decrypt, + .init = omap_des_init_tfm, + }, + .op.do_one_request = omap_des_crypt_req, } }; @@ -947,7 +952,7 @@ static int omap_des_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct omap_des_dev *dd; - struct skcipher_alg *algp; + struct skcipher_engine_alg *algp; struct resource *res; int err = -ENOMEM, i, j, irq = -1; u32 reg; @@ -1035,9 +1040,9 @@ static int omap_des_probe(struct platform_device *pdev) for (j = 0; j < dd->pdata->algs_info[i].size; j++) { algp = &dd->pdata->algs_info[i].algs_list[j]; - pr_debug("reg alg: %s\n", algp->base.cra_name); + pr_debug("reg alg: %s\n", algp->base.base.cra_name); - err = crypto_register_skcipher(algp); + err = crypto_engine_register_skcipher(algp); if (err) goto err_algs; @@ -1050,7 +1055,7 @@ static int omap_des_probe(struct platform_device *pdev) err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) - crypto_unregister_skcipher( + crypto_engine_unregister_skcipher( &dd->pdata->algs_info[i].algs_list[j]); err_engine: @@ -1080,7 +1085,7 @@ static int omap_des_remove(struct platform_device *pdev) for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) - crypto_unregister_skcipher( + crypto_engine_unregister_skcipher( &dd->pdata->algs_info[i].algs_list[j]); tasklet_kill(&dd->done_task); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 2ef92301969f..4b79d54fd671 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -13,34 +13,31 @@ #define pr_fmt(fmt) "%s: " fmt, __func__ +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -#include +#include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #define MD5_DIGEST_SIZE 16 @@ -168,7 +165,6 @@ struct omap_sham_hmac_ctx { }; struct omap_sham_ctx { - struct crypto_engine_ctx enginectx; unsigned long flags; /* fallback stuff */ @@ -180,7 +176,7 @@ struct omap_sham_ctx { #define OMAP_SHAM_QUEUE_LENGTH 10 struct omap_sham_algs_info { - struct ahash_alg *algs_list; + struct ahash_engine_alg *algs_list; unsigned int size; unsigned int registered; }; @@ -1353,8 +1349,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } - tctx->enginectx.op.do_one_request = omap_sham_hash_one_req; - return 0; } @@ -1425,15 +1419,15 @@ static int omap_sham_import(struct ahash_request *req, const void *in) return 0; } -static struct ahash_alg algs_sha1_md5[] = { +static struct ahash_engine_alg algs_sha1_md5[] = { { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.halg.digestsize = SHA1_DIGEST_SIZE, + .base.halg.base = { .cra_name = "sha1", .cra_driver_name = "omap-sha1", .cra_priority = 400, @@ -1446,16 +1440,17 @@ static struct ahash_alg algs_sha1_md5[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .halg.digestsize = MD5_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.halg.digestsize = MD5_DIGEST_SIZE, + .base.halg.base = { .cra_name = "md5", .cra_driver_name = "omap-md5", .cra_priority = 400, @@ -1468,17 +1463,18 @@ static struct ahash_alg algs_sha1_md5[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .setkey = omap_sham_setkey, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.setkey = omap_sham_setkey, + .base.halg.digestsize = SHA1_DIGEST_SIZE, + .base.halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "omap-hmac-sha1", .cra_priority = 400, @@ -1492,17 +1488,18 @@ static struct ahash_alg algs_sha1_md5[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha1_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .setkey = omap_sham_setkey, - .halg.digestsize = MD5_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.setkey = omap_sham_setkey, + .base.halg.digestsize = MD5_DIGEST_SIZE, + .base.halg.base = { .cra_name = "hmac(md5)", .cra_driver_name = "omap-hmac-md5", .cra_priority = 400, @@ -1516,20 +1513,21 @@ static struct ahash_alg algs_sha1_md5[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_md5_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, } }; /* OMAP4 has some algs in addition to what OMAP2 has */ -static struct ahash_alg algs_sha224_sha256[] = { +static struct ahash_engine_alg algs_sha224_sha256[] = { { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .halg.digestsize = SHA224_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.halg.digestsize = SHA224_DIGEST_SIZE, + .base.halg.base = { .cra_name = "sha224", .cra_driver_name = "omap-sha224", .cra_priority = 400, @@ -1542,16 +1540,17 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.halg.digestsize = SHA256_DIGEST_SIZE, + .base.halg.base = { .cra_name = "sha256", .cra_driver_name = "omap-sha256", .cra_priority = 400, @@ -1564,17 +1563,18 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .setkey = omap_sham_setkey, - .halg.digestsize = SHA224_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.setkey = omap_sham_setkey, + .base.halg.digestsize = SHA224_DIGEST_SIZE, + .base.halg.base = { .cra_name = "hmac(sha224)", .cra_driver_name = "omap-hmac-sha224", .cra_priority = 400, @@ -1588,17 +1588,18 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha224_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .setkey = omap_sham_setkey, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.setkey = omap_sham_setkey, + .base.halg.digestsize = SHA256_DIGEST_SIZE, + .base.halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "omap-hmac-sha256", .cra_priority = 400, @@ -1612,19 +1613,20 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha256_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, }; -static struct ahash_alg algs_sha384_sha512[] = { +static struct ahash_engine_alg algs_sha384_sha512[] = { { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.halg.digestsize = SHA384_DIGEST_SIZE, + .base.halg.base = { .cra_name = "sha384", .cra_driver_name = "omap-sha384", .cra_priority = 400, @@ -1637,16 +1639,17 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.halg.digestsize = SHA512_DIGEST_SIZE, + .base.halg.base = { .cra_name = "sha512", .cra_driver_name = "omap-sha512", .cra_priority = 400, @@ -1659,17 +1662,18 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .setkey = omap_sham_setkey, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.setkey = omap_sham_setkey, + .base.halg.digestsize = SHA384_DIGEST_SIZE, + .base.halg.base = { .cra_name = "hmac(sha384)", .cra_driver_name = "omap-hmac-sha384", .cra_priority = 400, @@ -1683,17 +1687,18 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha384_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, { - .init = omap_sham_init, - .update = omap_sham_update, - .final = omap_sham_final, - .finup = omap_sham_finup, - .digest = omap_sham_digest, - .setkey = omap_sham_setkey, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.base = { + .base.init = omap_sham_init, + .base.update = omap_sham_update, + .base.final = omap_sham_final, + .base.finup = omap_sham_finup, + .base.digest = omap_sham_digest, + .base.setkey = omap_sham_setkey, + .base.halg.digestsize = SHA512_DIGEST_SIZE, + .base.halg.base = { .cra_name = "hmac(sha512)", .cra_driver_name = "omap-hmac-sha512", .cra_priority = 400, @@ -1707,7 +1712,8 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha512_init, .cra_exit = omap_sham_cra_exit, - } + }, + .op.do_one_request = omap_sham_hash_one_req, }, }; @@ -2148,14 +2154,16 @@ static int omap_sham_probe(struct platform_device *pdev) break; for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + struct ahash_engine_alg *ealg; struct ahash_alg *alg; - alg = &dd->pdata->algs_info[i].algs_list[j]; + ealg = &dd->pdata->algs_info[i].algs_list[j]; + alg = &ealg->base; alg->export = omap_sham_export; alg->import = omap_sham_import; alg->halg.statesize = sizeof(struct omap_sham_reqctx) + BUFLEN; - err = crypto_register_ahash(alg); + err = crypto_engine_register_ahash(ealg); if (err) goto err_algs; @@ -2174,7 +2182,7 @@ static int omap_sham_probe(struct platform_device *pdev) err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) - crypto_unregister_ahash( + crypto_engine_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); err_engine_start: crypto_engine_exit(dd->engine); @@ -2205,7 +2213,7 @@ static int omap_sham_remove(struct platform_device *pdev) spin_unlock_bh(&sham.lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { - crypto_unregister_ahash( + crypto_engine_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); dd->pdata->algs_info[i].registered--; } From 1a15d26c3131b3210b7299d3864aa75b71687a16 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:10 +0800 Subject: [PATCH 139/149] crypto: rk3288 - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/rockchip/rk3288_crypto.c | 58 +++++++++------- drivers/crypto/rockchip/rk3288_crypto.h | 21 +++--- drivers/crypto/rockchip/rk3288_crypto_ahash.c | 67 +++++++++++-------- .../crypto/rockchip/rk3288_crypto_skcipher.c | 60 +++++++++++------ 4 files changed, 123 insertions(+), 83 deletions(-) diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 9f6ba770a90a..937826c50470 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -10,14 +10,22 @@ */ #include "rk3288_crypto.h" +#include +#include +#include +#include #include +#include +#include +#include +#include +#include #include #include #include #include -#include -#include #include +#include static struct rockchip_ip rocklist = { .dev_list = LIST_HEAD_INIT(rocklist.dev_list), @@ -184,7 +192,6 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_ahash_md5, }; -#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) { struct rk_crypto_info *dd; @@ -204,8 +211,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) switch (rk_cipher_algs[i]->type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", - rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name, - rk_cipher_algs[i]->alg.skcipher.base.cra_name, + rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name, + rk_cipher_algs[i]->alg.skcipher.base.base.cra_name, rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); seq_printf(seq, "\tfallback due to length: %lu\n", rk_cipher_algs[i]->stat_fb_len); @@ -216,8 +223,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) break; case CRYPTO_ALG_TYPE_AHASH: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", - rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name, - rk_cipher_algs[i]->alg.hash.halg.base.cra_name, + rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name, + rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name, rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); break; } @@ -226,17 +233,20 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); -#endif static void register_debugfs(struct rk_crypto_info *crypto_info) { -#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG + struct dentry *dbgfs_dir __maybe_unused; + struct dentry *dbgfs_stats __maybe_unused; + /* Ignore error of debugfs */ - rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); - rocklist.dbgfs_stats = debugfs_create_file("stats", 0444, - rocklist.dbgfs_dir, - &rocklist, - &rk_crypto_debugfs_fops); + dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); + dbgfs_stats = debugfs_create_file("stats", 0444, dbgfs_dir, &rocklist, + &rk_crypto_debugfs_fops); + +#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG + rocklist.dbgfs_dir = dbgfs_dir; + rocklist.dbgfs_stats = dbgfs_stats; #endif } @@ -250,15 +260,15 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info) switch (rk_cipher_algs[i]->type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(crypto_info->dev, "Register %s as %s\n", - rk_cipher_algs[i]->alg.skcipher.base.cra_name, - rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name); - err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher); + rk_cipher_algs[i]->alg.skcipher.base.base.cra_name, + rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name); + err = crypto_engine_register_skcipher(&rk_cipher_algs[i]->alg.skcipher); break; case CRYPTO_ALG_TYPE_AHASH: dev_info(crypto_info->dev, "Register %s as %s\n", - rk_cipher_algs[i]->alg.hash.halg.base.cra_name, - rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name); - err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash); + rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name, + rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name); + err = crypto_engine_register_ahash(&rk_cipher_algs[i]->alg.hash); break; default: dev_err(crypto_info->dev, "unknown algorithm\n"); @@ -271,9 +281,9 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info) err_cipher_algs: for (k = 0; k < i; k++) { if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) - crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher); + crypto_engine_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher); else - crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); + crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash); } return err; } @@ -284,9 +294,9 @@ static void rk_crypto_unregister(void) for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) - crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher); + crypto_engine_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher); else - crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); + crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash); } } diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index b2695258cade..3aa03cbfb6be 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -3,21 +3,18 @@ #define __RK3288_CRYPTO_H__ #include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include #include #include - #include #include #include +#include +#include +#include +#include +#include #define _SBF(v, f) ((v) << (f)) @@ -231,7 +228,6 @@ struct rk_crypto_info { /* the private variable of hash */ struct rk_ahash_ctx { - struct crypto_engine_ctx enginectx; /* for fallback */ struct crypto_ahash *fallback_tfm; }; @@ -246,7 +242,6 @@ struct rk_ahash_rctx { /* the private variable of cipher */ struct rk_cipher_ctx { - struct crypto_engine_ctx enginectx; unsigned int keylen; u8 key[AES_MAX_KEY_SIZE]; u8 iv[AES_BLOCK_SIZE]; @@ -264,8 +259,8 @@ struct rk_crypto_tmp { u32 type; struct rk_crypto_info *dev; union { - struct skcipher_alg skcipher; - struct ahash_alg hash; + struct skcipher_engine_alg skcipher; + struct ahash_engine_alg hash; } alg; unsigned long stat_req; unsigned long stat_fb; diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 1519aa0a0f7c..8c143180645e 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -8,9 +8,15 @@ * * Some ideas are from marvell/cesa.c and s5p-sss.c driver. */ -#include + #include +#include +#include +#include #include +#include +#include +#include #include "rk3288_crypto.h" /* @@ -40,8 +46,8 @@ static int rk_ahash_digest_fb(struct ahash_request *areq) struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base); algt->stat_fb++; @@ -254,8 +260,8 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); - struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base); struct scatterlist *sg = areq->src; struct rk_crypto_info *rkc = rctx->dev; int err; @@ -335,12 +341,12 @@ theend: return 0; } -static int rk_cra_hash_init(struct crypto_tfm *tfm) +static int rk_hash_init_tfm(struct crypto_ahash *tfm) { - struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); - const char *alg_name = crypto_tfm_alg_name(tfm); - struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); - struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + const char *alg_name = crypto_ahash_alg_name(tfm); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base); /* for fallback */ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, @@ -350,25 +356,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) return PTR_ERR(tctx->fallback_tfm); } - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + crypto_ahash_set_reqsize(tfm, sizeof(struct rk_ahash_rctx) + crypto_ahash_reqsize(tctx->fallback_tfm)); - tctx->enginectx.op.do_one_request = rk_hash_run; - return 0; } -static void rk_cra_hash_exit(struct crypto_tfm *tfm) +static void rk_hash_exit_tfm(struct crypto_ahash *tfm) { - struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); crypto_free_ahash(tctx->fallback_tfm); } struct rk_crypto_tmp rk_ahash_sha1 = { .type = CRYPTO_ALG_TYPE_AHASH, - .alg.hash = { + .alg.hash.base = { .init = rk_ahash_init, .update = rk_ahash_update, .final = rk_ahash_final, @@ -376,6 +380,8 @@ struct rk_crypto_tmp rk_ahash_sha1 = { .export = rk_ahash_export, .import = rk_ahash_import, .digest = rk_ahash_digest, + .init_tfm = rk_hash_init_tfm, + .exit_tfm = rk_hash_exit_tfm, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), @@ -388,17 +394,18 @@ struct rk_crypto_tmp rk_ahash_sha1 = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct rk_ahash_ctx), .cra_alignmask = 3, - .cra_init = rk_cra_hash_init, - .cra_exit = rk_cra_hash_exit, .cra_module = THIS_MODULE, } } - } + }, + .alg.hash.op = { + .do_one_request = rk_hash_run, + }, }; struct rk_crypto_tmp rk_ahash_sha256 = { .type = CRYPTO_ALG_TYPE_AHASH, - .alg.hash = { + .alg.hash.base = { .init = rk_ahash_init, .update = rk_ahash_update, .final = rk_ahash_final, @@ -406,6 +413,8 @@ struct rk_crypto_tmp rk_ahash_sha256 = { .export = rk_ahash_export, .import = rk_ahash_import, .digest = rk_ahash_digest, + .init_tfm = rk_hash_init_tfm, + .exit_tfm = rk_hash_exit_tfm, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), @@ -418,17 +427,18 @@ struct rk_crypto_tmp rk_ahash_sha256 = { .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct rk_ahash_ctx), .cra_alignmask = 3, - .cra_init = rk_cra_hash_init, - .cra_exit = rk_cra_hash_exit, .cra_module = THIS_MODULE, } } - } + }, + .alg.hash.op = { + .do_one_request = rk_hash_run, + }, }; struct rk_crypto_tmp rk_ahash_md5 = { .type = CRYPTO_ALG_TYPE_AHASH, - .alg.hash = { + .alg.hash.base = { .init = rk_ahash_init, .update = rk_ahash_update, .final = rk_ahash_final, @@ -436,6 +446,8 @@ struct rk_crypto_tmp rk_ahash_md5 = { .export = rk_ahash_export, .import = rk_ahash_import, .digest = rk_ahash_digest, + .init_tfm = rk_hash_init_tfm, + .exit_tfm = rk_hash_exit_tfm, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct md5_state), @@ -448,10 +460,11 @@ struct rk_crypto_tmp rk_ahash_md5 = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct rk_ahash_ctx), .cra_alignmask = 3, - .cra_init = rk_cra_hash_init, - .cra_exit = rk_cra_hash_exit, .cra_module = THIS_MODULE, } } - } + }, + .alg.hash.op = { + .do_one_request = rk_hash_run, + }, }; diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 59069457582b..da95747d973f 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -8,8 +8,14 @@ * * Some ideas are from marvell-cesa.c and s5p-sss.c driver. */ -#include + +#include +#include #include +#include +#include +#include +#include #include "rk3288_crypto.h" #define RK_CRYPTO_DEC BIT(0) @@ -18,7 +24,7 @@ static int rk_cipher_need_fallback(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); struct scatterlist *sgs, *sgd; unsigned int stodo, dtodo, len; unsigned int bs = crypto_skcipher_blocksize(tfm); @@ -65,7 +71,7 @@ static int rk_cipher_fallback(struct skcipher_request *areq) struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); int err; algt->stat_fb++; @@ -305,7 +311,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) unsigned int len = areq->cryptlen; unsigned int todo; struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); struct rk_crypto_info *rkc = rctx->dev; err = pm_runtime_resume_and_get(rkc->dev); @@ -430,7 +436,7 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); const char *name = crypto_tfm_alg_name(&tfm->base); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); - struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); + struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback_tfm)) { @@ -442,8 +448,6 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) tfm->reqsize = sizeof(struct rk_cipher_rctx) + crypto_skcipher_reqsize(ctx->fallback_tfm); - ctx->enginectx.op.do_one_request = rk_cipher_run; - return 0; } @@ -457,7 +461,7 @@ static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) struct rk_crypto_tmp rk_ecb_aes_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { + .alg.skcipher.base = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-rk", .base.cra_priority = 300, @@ -474,12 +478,15 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { .setkey = rk_aes_setkey, .encrypt = rk_aes_ecb_encrypt, .decrypt = rk_aes_ecb_decrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = rk_cipher_run, + }, }; struct rk_crypto_tmp rk_cbc_aes_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { + .alg.skcipher.base = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-rk", .base.cra_priority = 300, @@ -497,12 +504,15 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { .setkey = rk_aes_setkey, .encrypt = rk_aes_cbc_encrypt, .decrypt = rk_aes_cbc_decrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = rk_cipher_run, + }, }; struct rk_crypto_tmp rk_ecb_des_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { + .alg.skcipher.base = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-rk", .base.cra_priority = 300, @@ -519,12 +529,15 @@ struct rk_crypto_tmp rk_ecb_des_alg = { .setkey = rk_des_setkey, .encrypt = rk_des_ecb_encrypt, .decrypt = rk_des_ecb_decrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = rk_cipher_run, + }, }; struct rk_crypto_tmp rk_cbc_des_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { + .alg.skcipher.base = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-rk", .base.cra_priority = 300, @@ -542,12 +555,15 @@ struct rk_crypto_tmp rk_cbc_des_alg = { .setkey = rk_des_setkey, .encrypt = rk_des_cbc_encrypt, .decrypt = rk_des_cbc_decrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = rk_cipher_run, + }, }; struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { + .alg.skcipher.base = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3-ede-rk", .base.cra_priority = 300, @@ -564,12 +580,15 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_ecb_encrypt, .decrypt = rk_des3_ede_ecb_decrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = rk_cipher_run, + }, }; struct rk_crypto_tmp rk_cbc_des3_ede_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { + .alg.skcipher.base = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-ede-rk", .base.cra_priority = 300, @@ -587,5 +606,8 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = { .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_cbc_encrypt, .decrypt = rk_des3_ede_cbc_decrypt, - } + }, + .alg.skcipher.op = { + .do_one_request = rk_cipher_run, + }, }; From 982213e4730484ad080070a20f4a4a8deb0fa688 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:12 +0800 Subject: [PATCH 140/149] crypto: jh7110 - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/starfive/jh7110-aes.c | 162 ++++++++------- drivers/crypto/starfive/jh7110-cryp.c | 9 +- drivers/crypto/starfive/jh7110-cryp.h | 2 - drivers/crypto/starfive/jh7110-hash.c | 282 ++++++++++++++------------ 4 files changed, 251 insertions(+), 204 deletions(-) diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c index 777656cbb7ce..9378e6682f0e 100644 --- a/drivers/crypto/starfive/jh7110-aes.c +++ b/drivers/crypto/starfive/jh7110-aes.c @@ -5,12 +5,17 @@ * Copyright (c) 2022 StarFive Technology */ -#include +#include #include -#include #include #include +#include #include "jh7110-cryp.h" +#include +#include +#include +#include +#include #define STARFIVE_AES_REGS_OFFSET 0x100 #define STARFIVE_AES_AESDIO0R (STARFIVE_AES_REGS_OFFSET + 0x0) @@ -554,8 +559,6 @@ static int starfive_aes_init_tfm(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + sizeof(struct skcipher_request)); - ctx->enginectx.op.do_one_request = starfive_aes_do_one_req; - return 0; } @@ -638,8 +641,6 @@ static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm) crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) + sizeof(struct aead_request)); - ctx->enginectx.op.do_one_request = starfive_aes_aead_do_one_req; - return 0; } @@ -844,15 +845,15 @@ static int starfive_aes_ccm_decrypt(struct aead_request *req) return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM); } -static struct skcipher_alg skcipher_algs[] = { +static struct skcipher_engine_alg skcipher_algs[] = { { - .init = starfive_aes_init_tfm, - .setkey = starfive_aes_setkey, - .encrypt = starfive_aes_ecb_encrypt, - .decrypt = starfive_aes_ecb_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .base = { + .base.init = starfive_aes_init_tfm, + .base.setkey = starfive_aes_setkey, + .base.encrypt = starfive_aes_ecb_encrypt, + .base.decrypt = starfive_aes_ecb_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE, + .base.max_keysize = AES_MAX_KEY_SIZE, + .base.base = { .cra_name = "ecb(aes)", .cra_driver_name = "starfive-ecb-aes", .cra_priority = 200, @@ -862,15 +863,18 @@ static struct skcipher_alg skcipher_algs[] = { .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = starfive_aes_do_one_req, + }, }, { - .init = starfive_aes_init_tfm, - .setkey = starfive_aes_setkey, - .encrypt = starfive_aes_cbc_encrypt, - .decrypt = starfive_aes_cbc_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .base = { + .base.init = starfive_aes_init_tfm, + .base.setkey = starfive_aes_setkey, + .base.encrypt = starfive_aes_cbc_encrypt, + .base.decrypt = starfive_aes_cbc_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE, + .base.max_keysize = AES_MAX_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.base = { .cra_name = "cbc(aes)", .cra_driver_name = "starfive-cbc-aes", .cra_priority = 200, @@ -880,15 +884,18 @@ static struct skcipher_alg skcipher_algs[] = { .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = starfive_aes_do_one_req, + }, }, { - .init = starfive_aes_init_tfm, - .setkey = starfive_aes_setkey, - .encrypt = starfive_aes_ctr_encrypt, - .decrypt = starfive_aes_ctr_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .base = { + .base.init = starfive_aes_init_tfm, + .base.setkey = starfive_aes_setkey, + .base.encrypt = starfive_aes_ctr_encrypt, + .base.decrypt = starfive_aes_ctr_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE, + .base.max_keysize = AES_MAX_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.base = { .cra_name = "ctr(aes)", .cra_driver_name = "starfive-ctr-aes", .cra_priority = 200, @@ -898,15 +905,18 @@ static struct skcipher_alg skcipher_algs[] = { .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = starfive_aes_do_one_req, + }, }, { - .init = starfive_aes_init_tfm, - .setkey = starfive_aes_setkey, - .encrypt = starfive_aes_cfb_encrypt, - .decrypt = starfive_aes_cfb_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .base = { + .base.init = starfive_aes_init_tfm, + .base.setkey = starfive_aes_setkey, + .base.encrypt = starfive_aes_cfb_encrypt, + .base.decrypt = starfive_aes_cfb_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE, + .base.max_keysize = AES_MAX_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.base = { .cra_name = "cfb(aes)", .cra_driver_name = "starfive-cfb-aes", .cra_priority = 200, @@ -916,15 +926,18 @@ static struct skcipher_alg skcipher_algs[] = { .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = starfive_aes_do_one_req, + }, }, { - .init = starfive_aes_init_tfm, - .setkey = starfive_aes_setkey, - .encrypt = starfive_aes_ofb_encrypt, - .decrypt = starfive_aes_ofb_decrypt, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .base = { + .base.init = starfive_aes_init_tfm, + .base.setkey = starfive_aes_setkey, + .base.encrypt = starfive_aes_ofb_encrypt, + .base.decrypt = starfive_aes_ofb_decrypt, + .base.min_keysize = AES_MIN_KEY_SIZE, + .base.max_keysize = AES_MAX_KEY_SIZE, + .base.ivsize = AES_BLOCK_SIZE, + .base.base = { .cra_name = "ofb(aes)", .cra_driver_name = "starfive-ofb-aes", .cra_priority = 200, @@ -934,20 +947,23 @@ static struct skcipher_alg skcipher_algs[] = { .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = starfive_aes_do_one_req, + }, }, }; -static struct aead_alg aead_algs[] = { +static struct aead_engine_alg aead_algs[] = { { - .setkey = starfive_aes_aead_setkey, - .setauthsize = starfive_aes_gcm_setauthsize, - .encrypt = starfive_aes_gcm_encrypt, - .decrypt = starfive_aes_gcm_decrypt, - .init = starfive_aes_aead_init_tfm, - .exit = starfive_aes_aead_exit_tfm, - .ivsize = GCM_AES_IV_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .base = { + .base.setkey = starfive_aes_aead_setkey, + .base.setauthsize = starfive_aes_gcm_setauthsize, + .base.encrypt = starfive_aes_gcm_encrypt, + .base.decrypt = starfive_aes_gcm_decrypt, + .base.init = starfive_aes_aead_init_tfm, + .base.exit = starfive_aes_aead_exit_tfm, + .base.ivsize = GCM_AES_IV_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.base = { .cra_name = "gcm(aes)", .cra_driver_name = "starfive-gcm-aes", .cra_priority = 200, @@ -957,16 +973,19 @@ static struct aead_alg aead_algs[] = { .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = starfive_aes_aead_do_one_req, + }, }, { - .setkey = starfive_aes_aead_setkey, - .setauthsize = starfive_aes_ccm_setauthsize, - .encrypt = starfive_aes_ccm_encrypt, - .decrypt = starfive_aes_ccm_decrypt, - .init = starfive_aes_aead_init_tfm, - .exit = starfive_aes_aead_exit_tfm, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .base = { + .base.setkey = starfive_aes_aead_setkey, + .base.setauthsize = starfive_aes_ccm_setauthsize, + .base.encrypt = starfive_aes_ccm_encrypt, + .base.decrypt = starfive_aes_ccm_decrypt, + .base.init = starfive_aes_aead_init_tfm, + .base.exit = starfive_aes_aead_exit_tfm, + .base.ivsize = AES_BLOCK_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, + .base.base = { .cra_name = "ccm(aes)", .cra_driver_name = "starfive-ccm-aes", .cra_priority = 200, @@ -977,6 +996,9 @@ static struct aead_alg aead_algs[] = { .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = starfive_aes_aead_do_one_req, + }, }, }; @@ -984,19 +1006,19 @@ int starfive_aes_register_algs(void) { int ret; - ret = crypto_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); + ret = crypto_engine_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); if (ret) return ret; - ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); if (ret) - crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); + crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); return ret; } void starfive_aes_unregister_algs(void) { - crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); - crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); + crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c index ab37010ceb88..890ad5259329 100644 --- a/drivers/crypto/starfive/jh7110-cryp.c +++ b/drivers/crypto/starfive/jh7110-cryp.c @@ -7,17 +7,20 @@ * */ +#include +#include "jh7110-cryp.h" #include -#include +#include +#include #include #include +#include #include #include #include #include #include - -#include "jh7110-cryp.h" +#include #define DRIVER_NAME "jh7110-crypto" diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index 345a8d878761..fe011d50473d 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -3,7 +3,6 @@ #define __STARFIVE_STR_H__ #include -#include #include #include #include @@ -151,7 +150,6 @@ union starfive_alg_cr { }; struct starfive_cryp_ctx { - struct crypto_engine_ctx enginectx; struct starfive_cryp_dev *cryp; struct starfive_cryp_request_ctx *rctx; diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c index 7fe89cd13336..739e944229af 100644 --- a/drivers/crypto/starfive/jh7110-hash.c +++ b/drivers/crypto/starfive/jh7110-hash.c @@ -6,11 +6,14 @@ * */ +#include +#include +#include +#include "jh7110-cryp.h" +#include #include -#include #include #include -#include #include #include #include @@ -18,13 +21,6 @@ #include #include #include -#include - -#include -#include -#include - -#include "jh7110-cryp.h" #define STARFIVE_HASH_REGS_OFFSET 0x300 #define STARFIVE_HASH_SHACSR (STARFIVE_HASH_REGS_OFFSET + 0x0) @@ -433,8 +429,6 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash, ctx->keylen = 0; ctx->hash_mode = mode; - ctx->enginectx.op.do_one_request = starfive_hash_one_request; - return 0; } @@ -612,18 +606,18 @@ static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash) STARFIVE_HASH_SM3); } -static struct ahash_alg algs_sha2_sm3[] = { +static struct ahash_engine_alg algs_sha2_sm3[] = { { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_sha224_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_sha224_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { @@ -638,19 +632,22 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_hmac_sha224_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .setkey = starfive_hash_setkey, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_hmac_sha224_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.setkey = starfive_hash_setkey, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { @@ -665,18 +662,21 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_sha256_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_sha256_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { @@ -691,19 +691,22 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_hmac_sha256_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .setkey = starfive_hash_setkey, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_hmac_sha256_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.setkey = starfive_hash_setkey, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { @@ -718,18 +721,21 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_sha384_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_sha384_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { @@ -744,19 +750,22 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_hmac_sha384_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .setkey = starfive_hash_setkey, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_hmac_sha384_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.setkey = starfive_hash_setkey, + .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { @@ -771,18 +780,21 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_sha512_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_sha512_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { @@ -797,19 +809,22 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_hmac_sha512_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .setkey = starfive_hash_setkey, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_hmac_sha512_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.setkey = starfive_hash_setkey, + .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { @@ -824,18 +839,21 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_sm3_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_sm3_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.halg = { .digestsize = SM3_DIGEST_SIZE, .statesize = sizeof(struct sm3_state), .base = { @@ -850,19 +868,22 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, { - .init = starfive_hash_init, - .update = starfive_hash_update, - .final = starfive_hash_final, - .finup = starfive_hash_finup, - .digest = starfive_hash_digest, - .export = starfive_hash_export, - .import = starfive_hash_import, - .init_tfm = starfive_hmac_sm3_init_tfm, - .exit_tfm = starfive_hash_exit_tfm, - .setkey = starfive_hash_setkey, - .halg = { + .base.init = starfive_hash_init, + .base.update = starfive_hash_update, + .base.final = starfive_hash_final, + .base.finup = starfive_hash_finup, + .base.digest = starfive_hash_digest, + .base.export = starfive_hash_export, + .base.import = starfive_hash_import, + .base.init_tfm = starfive_hmac_sm3_init_tfm, + .base.exit_tfm = starfive_hash_exit_tfm, + .base.setkey = starfive_hash_setkey, + .base.halg = { .digestsize = SM3_DIGEST_SIZE, .statesize = sizeof(struct sm3_state), .base = { @@ -877,16 +898,19 @@ static struct ahash_alg algs_sha2_sm3[] = { .cra_alignmask = 3, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = starfive_hash_one_request, + }, }, }; int starfive_hash_register_algs(void) { - return crypto_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); + return crypto_engine_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); } void starfive_hash_unregister_algs(void) { - crypto_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); + crypto_engine_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); } From d5e6b48f94d66ca05ea64341b7ffbb7f981cca64 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:14 +0800 Subject: [PATCH 141/149] crypto: stm32 - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-cryp.c | 317 +++++++++++--------- drivers/crypto/stm32/stm32-hash.c | 483 +++++++++++++++++------------- 2 files changed, 445 insertions(+), 355 deletions(-) diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 07e32b8dbe29..c67239686b1e 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -5,22 +5,24 @@ * Ux500 support taken from snippets in the old Ux500 cryp driver */ +#include +#include +#include +#include +#include +#include #include #include -#include +#include #include +#include +#include #include #include #include #include #include - -#include -#include -#include -#include -#include -#include +#include #define DRIVER_NAME "stm32-cryp" @@ -156,7 +158,6 @@ struct stm32_cryp_caps { }; struct stm32_cryp_ctx { - struct crypto_engine_ctx enginectx; struct stm32_cryp *cryp; int keylen; __be32 key[AES_KEYSIZE_256 / sizeof(u32)]; @@ -828,11 +829,8 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq); static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm) { - struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx)); - ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req; return 0; } @@ -840,12 +838,8 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq); static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm) { - struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm); - tfm->reqsize = sizeof(struct stm32_cryp_reqctx); - ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req; - return 0; } @@ -1686,143 +1680,178 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg) return IRQ_WAKE_THREAD; } -static struct skcipher_alg crypto_algs[] = { +static struct skcipher_engine_alg crypto_algs[] = { { - .base.cra_name = "ecb(aes)", - .base.cra_driver_name = "stm32-ecb-aes", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "stm32-ecb-aes", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, - .init = stm32_cryp_init_tfm, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = stm32_cryp_aes_setkey, - .encrypt = stm32_cryp_aes_ecb_encrypt, - .decrypt = stm32_cryp_aes_ecb_decrypt, + .init = stm32_cryp_init_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = stm32_cryp_aes_setkey, + .encrypt = stm32_cryp_aes_ecb_encrypt, + .decrypt = stm32_cryp_aes_ecb_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .base.cra_name = "cbc(aes)", - .base.cra_driver_name = "stm32-cbc-aes", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "stm32-cbc-aes", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, - .init = stm32_cryp_init_tfm, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = stm32_cryp_aes_setkey, - .encrypt = stm32_cryp_aes_cbc_encrypt, - .decrypt = stm32_cryp_aes_cbc_decrypt, + .init = stm32_cryp_init_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = stm32_cryp_aes_setkey, + .encrypt = stm32_cryp_aes_cbc_encrypt, + .decrypt = stm32_cryp_aes_cbc_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "stm32-ctr-aes", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "stm32-ctr-aes", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, - .init = stm32_cryp_init_tfm, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = stm32_cryp_aes_setkey, - .encrypt = stm32_cryp_aes_ctr_encrypt, - .decrypt = stm32_cryp_aes_ctr_decrypt, + .init = stm32_cryp_init_tfm, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = stm32_cryp_aes_setkey, + .encrypt = stm32_cryp_aes_ctr_encrypt, + .decrypt = stm32_cryp_aes_ctr_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .base.cra_name = "ecb(des)", - .base.cra_driver_name = "stm32-ecb-des", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ecb(des)", + .base.cra_driver_name = "stm32-ecb-des", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, - .init = stm32_cryp_init_tfm, - .min_keysize = DES_BLOCK_SIZE, - .max_keysize = DES_BLOCK_SIZE, - .setkey = stm32_cryp_des_setkey, - .encrypt = stm32_cryp_des_ecb_encrypt, - .decrypt = stm32_cryp_des_ecb_decrypt, + .init = stm32_cryp_init_tfm, + .min_keysize = DES_BLOCK_SIZE, + .max_keysize = DES_BLOCK_SIZE, + .setkey = stm32_cryp_des_setkey, + .encrypt = stm32_cryp_des_ecb_encrypt, + .decrypt = stm32_cryp_des_ecb_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .base.cra_name = "cbc(des)", - .base.cra_driver_name = "stm32-cbc-des", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "cbc(des)", + .base.cra_driver_name = "stm32-cbc-des", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, - .init = stm32_cryp_init_tfm, - .min_keysize = DES_BLOCK_SIZE, - .max_keysize = DES_BLOCK_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = stm32_cryp_des_setkey, - .encrypt = stm32_cryp_des_cbc_encrypt, - .decrypt = stm32_cryp_des_cbc_decrypt, + .init = stm32_cryp_init_tfm, + .min_keysize = DES_BLOCK_SIZE, + .max_keysize = DES_BLOCK_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = stm32_cryp_des_setkey, + .encrypt = stm32_cryp_des_cbc_encrypt, + .decrypt = stm32_cryp_des_cbc_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .base.cra_name = "ecb(des3_ede)", - .base.cra_driver_name = "stm32-ecb-des3", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "ecb(des3_ede)", + .base.cra_driver_name = "stm32-ecb-des3", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, - .init = stm32_cryp_init_tfm, - .min_keysize = 3 * DES_BLOCK_SIZE, - .max_keysize = 3 * DES_BLOCK_SIZE, - .setkey = stm32_cryp_tdes_setkey, - .encrypt = stm32_cryp_tdes_ecb_encrypt, - .decrypt = stm32_cryp_tdes_ecb_decrypt, + .init = stm32_cryp_init_tfm, + .min_keysize = 3 * DES_BLOCK_SIZE, + .max_keysize = 3 * DES_BLOCK_SIZE, + .setkey = stm32_cryp_tdes_setkey, + .encrypt = stm32_cryp_tdes_ecb_encrypt, + .decrypt = stm32_cryp_tdes_ecb_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, { - .base.cra_name = "cbc(des3_ede)", - .base.cra_driver_name = "stm32-cbc-des3", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = DES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0, - .base.cra_module = THIS_MODULE, + .base = { + .base.cra_name = "cbc(des3_ede)", + .base.cra_driver_name = "stm32-cbc-des3", + .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_blocksize = DES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), + .base.cra_alignmask = 0, + .base.cra_module = THIS_MODULE, - .init = stm32_cryp_init_tfm, - .min_keysize = 3 * DES_BLOCK_SIZE, - .max_keysize = 3 * DES_BLOCK_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = stm32_cryp_tdes_setkey, - .encrypt = stm32_cryp_tdes_cbc_encrypt, - .decrypt = stm32_cryp_tdes_cbc_decrypt, + .init = stm32_cryp_init_tfm, + .min_keysize = 3 * DES_BLOCK_SIZE, + .max_keysize = 3 * DES_BLOCK_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = stm32_cryp_tdes_setkey, + .encrypt = stm32_cryp_tdes_cbc_encrypt, + .decrypt = stm32_cryp_tdes_cbc_decrypt, + }, + .op = { + .do_one_request = stm32_cryp_cipher_one_req, + }, }, }; -static struct aead_alg aead_algs[] = { +static struct aead_engine_alg aead_algs[] = { { - .setkey = stm32_cryp_aes_aead_setkey, - .setauthsize = stm32_cryp_aes_gcm_setauthsize, - .encrypt = stm32_cryp_aes_gcm_encrypt, - .decrypt = stm32_cryp_aes_gcm_decrypt, - .init = stm32_cryp_aes_aead_init, - .ivsize = 12, - .maxauthsize = AES_BLOCK_SIZE, + .base.setkey = stm32_cryp_aes_aead_setkey, + .base.setauthsize = stm32_cryp_aes_gcm_setauthsize, + .base.encrypt = stm32_cryp_aes_gcm_encrypt, + .base.decrypt = stm32_cryp_aes_gcm_decrypt, + .base.init = stm32_cryp_aes_aead_init, + .base.ivsize = 12, + .base.maxauthsize = AES_BLOCK_SIZE, - .base = { + .base.base = { .cra_name = "gcm(aes)", .cra_driver_name = "stm32-gcm-aes", .cra_priority = 200, @@ -1832,17 +1861,20 @@ static struct aead_alg aead_algs[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = stm32_cryp_aead_one_req, + }, }, { - .setkey = stm32_cryp_aes_aead_setkey, - .setauthsize = stm32_cryp_aes_ccm_setauthsize, - .encrypt = stm32_cryp_aes_ccm_encrypt, - .decrypt = stm32_cryp_aes_ccm_decrypt, - .init = stm32_cryp_aes_aead_init, - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, + .base.setkey = stm32_cryp_aes_aead_setkey, + .base.setauthsize = stm32_cryp_aes_ccm_setauthsize, + .base.encrypt = stm32_cryp_aes_ccm_encrypt, + .base.decrypt = stm32_cryp_aes_ccm_decrypt, + .base.init = stm32_cryp_aes_aead_init, + .base.ivsize = AES_BLOCK_SIZE, + .base.maxauthsize = AES_BLOCK_SIZE, - .base = { + .base.base = { .cra_name = "ccm(aes)", .cra_driver_name = "stm32-ccm-aes", .cra_priority = 200, @@ -1852,6 +1884,9 @@ static struct aead_alg aead_algs[] = { .cra_alignmask = 0, .cra_module = THIS_MODULE, }, + .op = { + .do_one_request = stm32_cryp_aead_one_req, + }, }, }; @@ -2013,14 +2048,14 @@ static int stm32_cryp_probe(struct platform_device *pdev) goto err_engine2; } - ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); + ret = crypto_engine_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); if (ret) { dev_err(dev, "Could not register algs\n"); goto err_algs; } if (cryp->caps->aeads_support) { - ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); if (ret) goto err_aead_algs; } @@ -2032,7 +2067,7 @@ static int stm32_cryp_probe(struct platform_device *pdev) return 0; err_aead_algs: - crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); + crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); err_algs: err_engine2: crypto_engine_exit(cryp->engine); @@ -2062,8 +2097,8 @@ static int stm32_cryp_remove(struct platform_device *pdev) return ret; if (cryp->caps->aeads_support) - crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); - crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); + crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); + crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); crypto_engine_exit(cryp->engine); diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 68c52eeaa6b1..53cf922d1a38 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -6,12 +6,18 @@ * Author(s): Lionel DEBIEVE for STMicroelectronics. */ +#include +#include +#include +#include +#include +#include +#include #include #include #include #include #include -#include #include #include #include @@ -19,15 +25,7 @@ #include #include #include - -#include -#include -#include -#include -#include -#include -#include -#include +#include #define HASH_CR 0x00 #define HASH_DIN 0x04 @@ -133,7 +131,6 @@ enum ux500_hash_algo { #define HASH_AUTOSUSPEND_DELAY 50 struct stm32_hash_ctx { - struct crypto_engine_ctx enginectx; struct stm32_hash_dev *hdev; struct crypto_shash *xtfm; unsigned long flags; @@ -177,7 +174,7 @@ struct stm32_hash_request_ctx { }; struct stm32_hash_algs_info { - struct ahash_alg *algs_list; + struct ahash_engine_alg *algs_list; size_t size; }; @@ -1194,8 +1191,6 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags) if (algs_flags) ctx->flags |= algs_flags; - ctx->enginectx.op.do_one_request = stm32_hash_one_request; - return stm32_hash_init_fallback(tfm); } @@ -1268,16 +1263,16 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) return IRQ_NONE; } -static struct ahash_alg algs_md5[] = { +static struct ahash_engine_alg algs_md5[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1293,18 +1288,21 @@ static struct ahash_alg algs_md5[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1320,20 +1318,23 @@ static struct ahash_alg algs_md5[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, } }; -static struct ahash_alg algs_sha1[] = { +static struct ahash_engine_alg algs_sha1[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1349,18 +1350,21 @@ static struct ahash_alg algs_sha1[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1376,20 +1380,23 @@ static struct ahash_alg algs_sha1[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, }; -static struct ahash_alg algs_sha224[] = { +static struct ahash_engine_alg algs_sha224[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1405,18 +1412,21 @@ static struct ahash_alg algs_sha224[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .setkey = stm32_hash_setkey, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.setkey = stm32_hash_setkey, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1432,20 +1442,23 @@ static struct ahash_alg algs_sha224[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, }; -static struct ahash_alg algs_sha256[] = { +static struct ahash_engine_alg algs_sha256[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1461,18 +1474,21 @@ static struct ahash_alg algs_sha256[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1488,20 +1504,23 @@ static struct ahash_alg algs_sha256[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, }; -static struct ahash_alg algs_sha384_sha512[] = { +static struct ahash_engine_alg algs_sha384_sha512[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1517,18 +1536,21 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .setkey = stm32_hash_setkey, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.setkey = stm32_hash_setkey, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1544,17 +1566,20 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1570,18 +1595,21 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1597,20 +1625,23 @@ static struct ahash_alg algs_sha384_sha512[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, }; -static struct ahash_alg algs_sha3[] = { +static struct ahash_engine_alg algs_sha3[] = { { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA3_224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1626,18 +1657,21 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA3_224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1653,17 +1687,20 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, - { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA3_256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1679,18 +1716,21 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA3_256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1706,17 +1746,20 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA3_384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1732,18 +1775,21 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA3_384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1759,17 +1805,20 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.halg = { .digestsize = SHA3_512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1785,18 +1834,21 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, }, { - .init = stm32_hash_init, - .update = stm32_hash_update, - .final = stm32_hash_final, - .finup = stm32_hash_finup, - .digest = stm32_hash_digest, - .export = stm32_hash_export, - .import = stm32_hash_import, - .setkey = stm32_hash_setkey, - .halg = { + .base.init = stm32_hash_init, + .base.update = stm32_hash_update, + .base.final = stm32_hash_final, + .base.finup = stm32_hash_finup, + .base.digest = stm32_hash_digest, + .base.export = stm32_hash_export, + .base.import = stm32_hash_import, + .base.setkey = stm32_hash_setkey, + .base.halg = { .digestsize = SHA3_512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { @@ -1812,7 +1864,10 @@ static struct ahash_alg algs_sha3[] = { .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } - } + }, + .op = { + .do_one_request = stm32_hash_one_request, + }, } }; @@ -1823,7 +1878,7 @@ static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) for (i = 0; i < hdev->pdata->algs_info_size; i++) { for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { - err = crypto_register_ahash( + err = crypto_engine_register_ahash( &hdev->pdata->algs_info[i].algs_list[j]); if (err) goto err_algs; @@ -1835,7 +1890,7 @@ err_algs: dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); for (; i--; ) { for (; j--;) - crypto_unregister_ahash( + crypto_engine_unregister_ahash( &hdev->pdata->algs_info[i].algs_list[j]); } @@ -1848,7 +1903,7 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev) for (i = 0; i < hdev->pdata->algs_info_size; i++) { for (j = 0; j < hdev->pdata->algs_info[i].size; j++) - crypto_unregister_ahash( + crypto_engine_unregister_ahash( &hdev->pdata->algs_info[i].algs_list[j]); } From 7a2673d70ca69bb890dc0fec590297f5aa7da739 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:16 +0800 Subject: [PATCH 142/149] crypto: virtio - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- .../virtio/virtio_crypto_akcipher_algs.c | 33 +++++++++++-------- .../virtio/virtio_crypto_skcipher_algs.c | 23 +++++++------ 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index ff3369ca319f..2621ff8a9376 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -7,15 +7,16 @@ * Copyright 2022 Bytedance CO., LTD. */ -#include -#include -#include +#include #include #include -#include #include -#include - +#include +#include +#include +#include +#include +#include #include #include "virtio_crypto_common.h" @@ -24,7 +25,6 @@ struct virtio_crypto_rsa_ctx { }; struct virtio_crypto_akcipher_ctx { - struct crypto_engine_ctx enginectx; struct virtio_crypto *vcrypto; struct crypto_akcipher *tfm; bool session_valid; @@ -47,7 +47,7 @@ struct virtio_crypto_akcipher_algo { uint32_t algonum; uint32_t service; unsigned int active_devs; - struct akcipher_alg algo; + struct akcipher_engine_alg algo; }; static DEFINE_MUTEX(algs_lock); @@ -475,7 +475,6 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); ctx->tfm = tfm; - ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req; akcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_akcipher_request)); @@ -498,7 +497,7 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { { .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA, .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER, - .algo = { + .algo.base = { .encrypt = virtio_crypto_rsa_encrypt, .decrypt = virtio_crypto_rsa_decrypt, .set_pub_key = virtio_crypto_rsa_raw_set_pub_key, @@ -514,11 +513,14 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx), }, }, + .algo.op = { + .do_one_request = virtio_crypto_rsa_do_req, + }, }, { .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA, .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER, - .algo = { + .algo.base = { .encrypt = virtio_crypto_rsa_encrypt, .decrypt = virtio_crypto_rsa_decrypt, .sign = virtio_crypto_rsa_sign, @@ -536,6 +538,9 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx), }, }, + .algo.op = { + .do_one_request = virtio_crypto_rsa_do_req, + }, }, }; @@ -554,14 +559,14 @@ int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto) continue; if (virtio_crypto_akcipher_algs[i].active_devs == 0) { - ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo); + ret = crypto_engine_register_akcipher(&virtio_crypto_akcipher_algs[i].algo); if (ret) goto unlock; } virtio_crypto_akcipher_algs[i].active_devs++; dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n", - virtio_crypto_akcipher_algs[i].algo.base.cra_name); + virtio_crypto_akcipher_algs[i].algo.base.base.cra_name); } unlock: @@ -584,7 +589,7 @@ void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto) continue; if (virtio_crypto_akcipher_algs[i].active_devs == 1) - crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo); + crypto_engine_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo); virtio_crypto_akcipher_algs[i].active_devs--; } diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index 71b8751ab5ab..23c41d87d835 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -6,19 +6,16 @@ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. */ -#include -#include +#include #include -#include #include -#include - +#include +#include #include #include "virtio_crypto_common.h" struct virtio_crypto_skcipher_ctx { - struct crypto_engine_ctx enginectx; struct virtio_crypto *vcrypto; struct crypto_skcipher *tfm; @@ -42,7 +39,7 @@ struct virtio_crypto_algo { uint32_t algonum; uint32_t service; unsigned int active_devs; - struct skcipher_alg algo; + struct skcipher_engine_alg algo; }; /* @@ -523,7 +520,6 @@ static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request)); ctx->tfm = tfm; - ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req; return 0; } @@ -578,7 +574,7 @@ static void virtio_crypto_skcipher_finalize_req( static struct virtio_crypto_algo virtio_crypto_algs[] = { { .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC, .service = VIRTIO_CRYPTO_SERVICE_CIPHER, - .algo = { + .algo.base = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "virtio_crypto_aes_cbc", .base.cra_priority = 150, @@ -596,6 +592,9 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { { .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, + .algo.op = { + .do_one_request = virtio_crypto_skcipher_crypt_req, + }, } }; int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto) @@ -614,14 +613,14 @@ int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto) continue; if (virtio_crypto_algs[i].active_devs == 0) { - ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo); + ret = crypto_engine_register_skcipher(&virtio_crypto_algs[i].algo); if (ret) goto unlock; } virtio_crypto_algs[i].active_devs++; dev_info(&vcrypto->vdev->dev, "Registered algo %s\n", - virtio_crypto_algs[i].algo.base.cra_name); + virtio_crypto_algs[i].algo.base.base.cra_name); } unlock: @@ -645,7 +644,7 @@ void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto) continue; if (virtio_crypto_algs[i].active_devs == 1) - crypto_unregister_skcipher(&virtio_crypto_algs[i].algo); + crypto_engine_unregister_skcipher(&virtio_crypto_algs[i].algo); virtio_crypto_algs[i].active_devs--; } From 28f860d377da437695a6db2330dad3f8ca672f8d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:18 +0800 Subject: [PATCH 143/149] crypto: zynqmp - Use new crypto_engine_op interface Use the new crypto_engine_op interface where the callback is stored in the algorithm object. Signed-off-by: Herbert Xu --- drivers/crypto/xilinx/zynqmp-aes-gcm.c | 35 +++++++++++++------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index ae7dd82abea1..42fe2b7afef2 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -9,13 +9,14 @@ #include #include #include - #include +#include +#include +#include #include #include #include - -#include +#include #define ZYNQMP_DMA_BIT_MASK 32U @@ -43,7 +44,7 @@ enum zynqmp_aead_keysrc { struct zynqmp_aead_drv_ctx { union { - struct aead_alg aead; + struct aead_engine_alg aead; } alg; struct device *dev; struct crypto_engine *engine; @@ -60,7 +61,6 @@ struct zynqmp_aead_hw_req { }; struct zynqmp_aead_tfm_ctx { - struct crypto_engine_ctx engine_ctx; struct device *dev; u8 key[ZYNQMP_AES_KEY_SIZE]; u8 *iv; @@ -286,7 +286,7 @@ static int zynqmp_aes_aead_encrypt(struct aead_request *req) struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req); rq_ctx->op = ZYNQMP_AES_ENCRYPT; - drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead); + drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base); return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req); } @@ -299,7 +299,7 @@ static int zynqmp_aes_aead_decrypt(struct aead_request *req) struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req); rq_ctx->op = ZYNQMP_AES_DECRYPT; - drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead); + drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base); return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req); } @@ -312,18 +312,16 @@ static int zynqmp_aes_aead_init(struct crypto_aead *aead) struct zynqmp_aead_drv_ctx *drv_ctx; struct aead_alg *alg = crypto_aead_alg(aead); - drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead); + drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base); tfm_ctx->dev = drv_ctx->dev; - tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req; - - tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name, + tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(tfm_ctx->fbk_cipher)) { pr_err("%s() Error: failed to allocate fallback for %s\n", - __func__, drv_ctx->alg.aead.base.cra_name); + __func__, drv_ctx->alg.aead.base.base.cra_name); return PTR_ERR(tfm_ctx->fbk_cipher); } @@ -348,7 +346,7 @@ static void zynqmp_aes_aead_exit(struct crypto_aead *aead) } static struct zynqmp_aead_drv_ctx aes_drv_ctx = { - .alg.aead = { + .alg.aead.base = { .setkey = zynqmp_aes_aead_setkey, .setauthsize = zynqmp_aes_aead_setauthsize, .encrypt = zynqmp_aes_aead_encrypt, @@ -370,7 +368,10 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = { .cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx), .cra_module = THIS_MODULE, } - } + }, + .alg.aead.op = { + .do_one_request = zynqmp_handle_aes_req, + }, }; static int zynqmp_aes_aead_probe(struct platform_device *pdev) @@ -403,7 +404,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev) goto err_engine; } - err = crypto_register_aead(&aes_drv_ctx.alg.aead); + err = crypto_engine_register_aead(&aes_drv_ctx.alg.aead); if (err < 0) { dev_err(dev, "Failed to register AEAD alg.\n"); goto err_aead; @@ -411,7 +412,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev) return 0; err_aead: - crypto_unregister_aead(&aes_drv_ctx.alg.aead); + crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead); err_engine: if (aes_drv_ctx.engine) @@ -423,7 +424,7 @@ err_engine: static int zynqmp_aes_aead_remove(struct platform_device *pdev) { crypto_engine_exit(aes_drv_ctx.engine); - crypto_unregister_aead(&aes_drv_ctx.alg.aead); + crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead); return 0; } From 5ce0bc68e0eeab14fe4dd3be9975c5ced7b20617 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 13 Aug 2023 14:55:20 +0800 Subject: [PATCH 144/149] crypto: engine - Remove crypto_engine_ctx Remove the obsolete crypto_engine_ctx structure. Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 12 +++--------- include/crypto/engine.h | 4 ---- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index abfb1e6bfa48..108d9d55c509 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -79,7 +79,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, unsigned long flags; bool was_busy = false; int ret; - struct crypto_engine_ctx *enginectx; spin_lock_irqsave(&engine->queue_lock, flags); @@ -154,14 +153,9 @@ start_request: struct crypto_engine_alg, base); op = &alg->op; } else { - enginectx = crypto_tfm_ctx(async_req->tfm); - op = &enginectx->op; - - if (!op->do_one_request) { - dev_err(engine->dev, "failed to do request\n"); - ret = -EINVAL; - goto req_err_1; - } + dev_err(engine->dev, "failed to do request\n"); + ret = -EINVAL; + goto req_err_1; } ret = op->do_one_request(engine, async_req); diff --git a/include/crypto/engine.h b/include/crypto/engine.h index cf57e732566b..2835069c5997 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -26,10 +26,6 @@ struct crypto_engine_op { void *areq); }; -struct crypto_engine_ctx { - struct crypto_engine_op op; -}; - struct aead_engine_alg { struct aead_alg base; struct crypto_engine_op op; From b0cc7491c98917f191f14efce7630b547f7ec419 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 14 Jul 2023 11:44:18 -0600 Subject: [PATCH 145/149] crypto: drivers - Explicitly include correct DT includes The DT of_device.h and of_platform.h date back to the separate of_platform_bus_type before it as merged into the regular platform bus. As part of that merge prepping Arm DT support 13 years ago, they "temporarily" include each other. They also include platform_device.h and of.h. As a result, there's a pretty much random mix of those include files used throughout the tree. In order to detangle these headers and replace the implicit includes with struct declarations, users need to explicitly include the correct includes. Signed-off-by: Rob Herring Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c | 1 - drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c | 1 - drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c | 1 - drivers/crypto/amlogic/amlogic-gxl-core.c | 1 - drivers/crypto/aspeed/aspeed-acry.c | 3 --- drivers/crypto/atmel-aes.c | 6 ++---- drivers/crypto/atmel-ecc.c | 2 +- drivers/crypto/atmel-sha.c | 6 ++---- drivers/crypto/atmel-tdes.c | 6 ++---- drivers/crypto/bcm/cipher.c | 3 +-- drivers/crypto/caam/ctrl.c | 1 + drivers/crypto/caam/jr.c | 1 + drivers/crypto/caam/qi.c | 1 + drivers/crypto/ccree/cc_driver.c | 1 - drivers/crypto/exynos-rng.c | 2 +- drivers/crypto/gemini/sl3516-ce-core.c | 1 - drivers/crypto/img-hash.c | 4 ++-- drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c | 3 ++- drivers/crypto/n2_core.c | 2 +- drivers/crypto/omap-aes.c | 1 - drivers/crypto/omap-des.c | 2 -- drivers/crypto/omap-sham.c | 1 - drivers/crypto/rockchip/rk3288_crypto.c | 1 - drivers/crypto/s5p-sss.c | 1 - drivers/crypto/sa2ul.c | 3 ++- drivers/crypto/sahara.c | 1 - drivers/crypto/starfive/jh7110-cryp.c | 2 +- drivers/crypto/starfive/jh7110-hash.c | 1 - drivers/crypto/stm32/stm32-cryp.c | 2 +- drivers/crypto/stm32/stm32-hash.c | 2 +- drivers/crypto/talitos.c | 4 ++-- drivers/crypto/xilinx/zynqmp-aes-gcm.c | 2 +- drivers/crypto/xilinx/zynqmp-sha.c | 1 - 33 files changed, 25 insertions(+), 45 deletions(-) diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c index 51a3a7b5b985..3bcfcfc37084 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 1c2c32d0568e..d4ccd5254280 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 62c001c9efc2..4a9587285c04 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 9846d791c956..da6dfe0f9ac3 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include "amlogic-gxl.h" diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c index 2a970367e686..247c568aa8df 100644 --- a/drivers/crypto/aspeed/aspeed-acry.c +++ b/drivers/crypto/aspeed/aspeed-acry.c @@ -15,9 +15,6 @@ #include #include #include -#include -#include -#include #include #include #include diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 9692254faad9..55b5f577b01c 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -2533,13 +2533,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) } } -#if defined(CONFIG_OF) static const struct of_device_id atmel_aes_dt_ids[] = { { .compatible = "atmel,at91sam9g46-aes" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids); -#endif static int atmel_aes_probe(struct platform_device *pdev) { @@ -2678,7 +2676,7 @@ static struct platform_driver atmel_aes_driver = { .remove = atmel_aes_remove, .driver = { .name = "atmel_aes", - .of_match_table = of_match_ptr(atmel_aes_dt_ids), + .of_match_table = atmel_aes_dt_ids, }, }; diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 432beabd79e6..590ea984c622 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 99a9ff8e743f..3622120add62 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -2571,14 +2571,12 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd) } } -#if defined(CONFIG_OF) static const struct of_device_id atmel_sha_dt_ids[] = { { .compatible = "atmel,at91sam9g46-sha" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); -#endif static int atmel_sha_probe(struct platform_device *pdev) { @@ -2708,7 +2706,7 @@ static struct platform_driver atmel_sha_driver = { .remove = atmel_sha_remove, .driver = { .name = "atmel_sha", - .of_match_table = of_match_ptr(atmel_sha_dt_ids), + .of_match_table = atmel_sha_dt_ids, }, }; diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index ba8981f326cf..099b32a10dd7 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -1139,13 +1139,11 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd) } } -#if defined(CONFIG_OF) static const struct of_device_id atmel_tdes_dt_ids[] = { { .compatible = "atmel,at91sam9g46-tdes" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids); -#endif static int atmel_tdes_probe(struct platform_device *pdev) { @@ -1274,7 +1272,7 @@ static struct platform_driver atmel_tdes_driver = { .remove = atmel_tdes_remove, .driver = { .name = "atmel_tdes", - .of_match_table = of_match_ptr(atmel_tdes_dt_ids), + .of_match_table = atmel_tdes_dt_ids, }, }; diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 4c46357e2570..689be70d69c1 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -15,8 +15,7 @@ #include #include #include -#include -#include +#include #include #include diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 2a228a36fa15..f63c3ed26c37 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 07ec2f27cc68..b1f1b393b98e 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -9,6 +9,7 @@ #include #include +#include #include "compat.h" #include "ctrl.h" diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 2ad2c1035856..46a083849a8e 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index c57f929805d5..0f0694037dd7 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include "cc_driver.h" diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c index b1df66be9adc..5d60a4bcb511 100644 --- a/drivers/crypto/exynos-rng.c +++ b/drivers/crypto/exynos-rng.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c index 0fd47ab9df5c..0f43c6e39bb9 100644 --- a/drivers/crypto/gemini/sl3516-ce-core.c +++ b/drivers/crypto/gemini/sl3516-ce-core.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 359aa2b41016..45063693859c 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include @@ -1105,7 +1105,7 @@ static struct platform_driver img_hash_driver = { .driver = { .name = "img-hash-accelerator", .pm = &img_hash_pm_ops, - .of_match_table = of_match_ptr(img_hash_match), + .of_match_table = img_hash_match, } }; module_platform_driver(img_hash_driver); diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c index 57a20281ead8..daba8ca05dbe 100644 --- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c @@ -16,8 +16,9 @@ #include #include #include +#include #include -#include +#include #include #include "ocs-hcu.h" diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 4f6ca229ee5e..d5a32d71a3e9 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index ea1331218105..ed83023dd77a 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index ae9e9e4eb94c..089dd45eaedd 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -29,8 +29,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4b79d54fd671..a6b4a0b3ace3 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 937826c50470..77d5705a5d96 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 1c4d5fb05d69..fe8cf9ba8005 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index df5f9d675c57..6238d34f8db2 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -15,7 +15,8 @@ #include #include #include -#include +#include +#include #include #include diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 4c799df3e883..62d93526920f 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c index 890ad5259329..08e974e0dd12 100644 --- a/drivers/crypto/starfive/jh7110-cryp.c +++ b/drivers/crypto/starfive/jh7110-cryp.c @@ -15,8 +15,8 @@ #include #include #include +#include #include -#include #include #include #include diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c index 739e944229af..cc7650198d70 100644 --- a/drivers/crypto/starfive/jh7110-hash.c +++ b/drivers/crypto/starfive/jh7110-hash.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index c67239686b1e..f095f0065428 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 53cf922d1a38..2b2382d4332c 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index bb27f011cf31..4ca4fbd227bc 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -19,9 +19,9 @@ #include #include #include -#include +#include #include -#include +#include #include #include #include diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index 42fe2b7afef2..ce335578b759 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index 43ff170ff1c2..426bf1a72ba6 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #define ZYNQMP_DMA_BIT_MASK 32U From fab9516f02b418e37d3cde6c21c316085262aece Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Mon, 14 Aug 2023 16:52:30 +0100 Subject: [PATCH 146/149] crypto: qat - fix crypto capability detection for 4xxx When extending the capability detection logic for 4xxx devices the SMx algorithms were accidentally missed. Enable these SMx capabilities by default for QAT GEN4 devices. Check for device variants where the SMx algorithms are explicitly disabled by the GEN4 hardware. This is indicated in fusectl1 register. Mask out SM3 and SM4 based on a bit specific to those algorithms. Mask out SM2 if the PKE slice is not present. Fixes: 4b44d28c715d ("crypto: qat - extend crypto capability detection for 4xxx") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Reviewed-by: Fiona Trahe Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 9 +++++++++ drivers/crypto/intel/qat/qat_common/icp_qat_hw.h | 5 ++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 268a1f7694fc..dd4464b7e00b 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -225,6 +225,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_CHACHA_POLY | ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_SM3 | + ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_AES_V2; /* A set bit in fusectl1 means the feature is OFF in this SKU */ @@ -248,12 +250,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } + if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; + } + capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT; if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; } diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index a65059e56248..0c8883e2ccc6 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -97,7 +97,10 @@ enum icp_qat_capabilities_mask { ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15), ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16), ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17), - /* Bits 18-21 are currently reserved */ + ICP_ACCEL_CAPABILITIES_SM2 = BIT(18), + ICP_ACCEL_CAPABILITIES_SM3 = BIT(19), + ICP_ACCEL_CAPABILITIES_SM4 = BIT(20), + /* Bit 21 is currently reserved */ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22), ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23), ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24), From ef5b52a631f8c18353e80ccab8408b963305510c Mon Sep 17 00:00:00 2001 From: Thore Sommer Date: Tue, 15 Aug 2023 14:29:42 +0300 Subject: [PATCH 147/149] X.509: if signature is unsupported skip validation When the hash algorithm for the signature is not available the digest size is 0 and the signature in the certificate is marked as unsupported. When validating a self-signed certificate, this needs to be checked, because otherwise trying to validate the signature will fail with an warning: Loading compiled-in X.509 certificates WARNING: CPU: 0 PID: 1 at crypto/rsa-pkcs1pad.c:537 \ pkcs1pad_verify+0x46/0x12c ... Problem loading in-kernel X.509 certificate (-22) Signed-off-by: Thore Sommer Cc: stable@vger.kernel.org # v4.7+ Fixes: 6c2dc5ae4ab7 ("X.509: Extract signature digest and make self-signed cert checks earlier") Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/x509_public_key.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6fdfc82e23a8..7c71db3ac23d 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -130,6 +130,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert) goto out; } + if (cert->unsupported_sig) { + ret = 0; + goto out; + } + ret = public_key_verify_signature(cert->pub, cert->sig); if (ret < 0) { if (ret == -ENOPKG) { From 9687daf785c035b4b6f73c998e6ee75bb0697c7e Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Thu, 17 Aug 2023 21:48:54 +0800 Subject: [PATCH 148/149] crypto: chelsio - Remove unused declarations These declarations are not implemented now, remove them. Signed-off-by: Yue Haibing Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_core.h | 1 - drivers/crypto/chelsio/chcr_crypto.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index f7c8bb95a71b..5e9d568131fe 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -133,7 +133,6 @@ int start_crypto(void); int stop_crypto(void); int chcr_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *pgl); -int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev); int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, int err); #endif /* __CHCR_CORE_H__ */ diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 7f88ddb08631..1d693b8436e6 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -344,7 +344,6 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req, struct cpl_rx_phys_dsgl *phys_cpl, struct cipher_wr_param *wrparam, unsigned short qid); -int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip); void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, struct hash_wr_param *param); int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); From 85b9bf9a514d991fcecb118d0a8a35e754ff9265 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Thu, 24 Aug 2023 13:33:20 +0200 Subject: [PATCH 149/149] Revert "dt-bindings: crypto: qcom,prng: Add SM8450" This reverts commit b9296bb41275 ("dt-bindings: crypto: qcom,prng: Add SM8450"), since the RNG HW on the SM8450 SoC is in fact a True Random Number Generator, a more appropriate compatible should be instead as reported at [1]. [1] https://lore.kernel.org/all/20230818161720.3644424-1-quic_omprsing@quicinc.com/ Suggested-by: Om Prakash Singh Suggested-by: Konrad Dybcio Signed-off-by: Neil Armstrong Reviewed-by: Konrad Dybcio Signed-off-by: Herbert Xu --- .../devicetree/bindings/crypto/qcom,prng.yaml | 24 ++++--------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml index 36b0ebd9a44b..bb42f4588b40 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml @@ -11,13 +11,9 @@ maintainers: properties: compatible: - oneOf: - - enum: - - qcom,prng # 8916 etc. - - qcom,prng-ee # 8996 and later using EE - - items: - - const: qcom,sm8450-prng-ee - - const: qcom,prng-ee + enum: + - qcom,prng # 8916 etc. + - qcom,prng-ee # 8996 and later using EE reg: maxItems: 1 @@ -32,18 +28,8 @@ properties: required: - compatible - reg - -allOf: - - if: - not: - properties: - compatible: - contains: - const: qcom,sm8450-prng-ee - then: - required: - - clocks - - clock-names + - clocks + - clock-names additionalProperties: false