From 4ac444ecf72f8f0dbcec5b410c38965411687c3f Mon Sep 17 00:00:00 2001 From: Julian Zhu Date: Mon, 27 Oct 2025 21:37:56 +0800 Subject: [PATCH] Backport SHA2 assembly optimization for RISC-V Signed-off-by: Julian Zhu --- ...rt-RISC-V-SHA2-assembly-optimization.patch | 1700 +++++++++++++++++ openssl.spec | 6 +- 2 files changed, 1705 insertions(+), 1 deletion(-) create mode 100644 Backport-RISC-V-SHA2-assembly-optimization.patch diff --git a/Backport-RISC-V-SHA2-assembly-optimization.patch b/Backport-RISC-V-SHA2-assembly-optimization.patch new file mode 100644 index 0000000..5c2fe00 --- /dev/null +++ b/Backport-RISC-V-SHA2-assembly-optimization.patch @@ -0,0 +1,1700 @@ +From 3f9b0326855d75d490c3771218a98ea218d83d4b Mon Sep 17 00:00:00 2001 +From: Julian Zhu +Date: Sat, 25 Oct 2025 02:25:47 +0800 +Subject: [PATCH] RISC-V: Backport SHA2 assembly optimization + +Signed-off-by: Julian Zhu +--- + crypto/sha/asm/sha256-riscv64-zbb.pl | 467 ++++++++++++++++++ + .../sha256-riscv64-zvkb-zvknha_or_zvknhb.pl | 316 ++++++++++++ + crypto/sha/asm/sha512-riscv64-zbb.pl | 436 ++++++++++++++++ + crypto/sha/asm/sha512-riscv64-zvkb-zvknhb.pl | 264 ++++++++++ + crypto/sha/build.info | 15 + + crypto/sha/sha256.c | 12 +- + crypto/sha/sha512.c | 10 +- + crypto/sha/sha_riscv.c | 49 ++ + 8 files changed, 1566 insertions(+), 3 deletions(-) + create mode 100644 crypto/sha/asm/sha256-riscv64-zbb.pl + create mode 100644 crypto/sha/asm/sha256-riscv64-zvkb-zvknha_or_zvknhb.pl + create mode 100644 crypto/sha/asm/sha512-riscv64-zbb.pl + create mode 100644 crypto/sha/asm/sha512-riscv64-zvkb-zvknhb.pl + create mode 100644 crypto/sha/sha_riscv.c + +diff --git a/crypto/sha/asm/sha256-riscv64-zbb.pl b/crypto/sha/asm/sha256-riscv64-zbb.pl +new file mode 100644 +index 0000000000..0027f0297c +--- /dev/null ++++ b/crypto/sha/asm/sha256-riscv64-zbb.pl +@@ -0,0 +1,467 @@ ++#! /usr/bin/env perl ++# This file is dual-licensed, meaning that you can use it under your ++# choice of either of the following two licenses: ++# ++# Copyright 2025 The OpenSSL Project Authors. All Rights Reserved. ++# ++# Licensed under the Apache License 2.0 (the "License"). You can obtain ++# a copy in the file LICENSE in the source distribution or at ++# https://www.openssl.org/source/license.html ++# ++# or ++# ++# Copyright (c) 2025, Julian Zhu ++# All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions ++# are met: ++# 1. Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# 2. Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in the ++# documentation and/or other materials provided with the distribution. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# The generated code of this file depends on the following RISC-V extensions: ++# - RV64I ++# Optional: ++# - RISC-V Basic Bit-manipulation extension ('Zbb') ++ ++use strict; ++use warnings; ++ ++use FindBin qw($Bin); ++use lib "$Bin"; ++use lib "$Bin/../../perlasm"; ++use riscv; ++ ++# $output is the last argument if it looks like a file (it has an extension) ++# $flavour is the first argument if it doesn't look like a file ++my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef; ++my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef; ++ ++my $use_zbb = $flavour && $flavour =~ /zbb/i ? 1 : 0; ++my $isaext = "_" . ( $use_zbb ? "zbb" : "riscv64" ); ++ ++$output and open STDOUT,">$output"; ++ ++my $code=<<___; ++.text ++___ ++ ++my $K256 = "K256"; ++ ++# Function arguments ++my ($INP, $LEN, $ADDR) = ("a1", "a2", "sp"); ++my ($KT, $T1, $T2, $T3, $T4, $T5, $T6, $T7, $T8) = ("t0", "t1", "t2", "t3", "t4", "t5", "t6", "a3", "a4"); ++my ($A, $B, $C, $D ,$E ,$F ,$G ,$H) = ("s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9"); ++ ++sub MSGSCHEDULE0 { ++ my ( ++ $index, ++ ) = @_; ++ if ($use_zbb) { ++ my $code=<<___; ++ lw $T1, (4*$index+0)($INP) ++ @{[rev8 $T1, $T1]} # rev8 $T1, $T1 ++ srli $T1, $T1, 32 ++ sw $T1, 4*$index($ADDR) ++___ ++ return $code; ++ } else { ++ my $code=<<___; ++ lbu $T1, (4*$index+0)($INP) ++ lbu $T2, (4*$index+1)($INP) ++ lbu $T3, (4*$index+2)($INP) ++ lbu $T4, (4*$index+3)($INP) ++ slliw $T1, $T1, 24 ++ slliw $T2, $T2, 16 ++ or $T1, $T1, $T2 ++ slliw $T3, $T3, 8 ++ or $T1, $T1, $T3 ++ or $T1, $T1, $T4 ++ sw $T1, 4*$index($ADDR) ++___ ++ return $code; ++ } ++} ++ ++sub MSGSCHEDULE1 { ++ my ( ++ $INDEX, ++ ) = @_; ++ my $code=<<___; ++ lw $T1, (($INDEX-2)&0x0f)*4($ADDR) ++ lw $T2, (($INDEX-15)&0x0f)*4($ADDR) ++ lw $T3, (($INDEX-7)&0x0f)*4($ADDR) ++ lw $T4, ($INDEX&0x0f)*4($ADDR) ++___ ++ if ($use_zbb) { ++ my $ror_part = <<___; ++ @{[roriw $T5, $T1, 17]} # roriw $T5, $T1, 17 ++ @{[roriw $T6, $T1, 19]} # roriw $T6, $T1, 19 ++___ ++ $code .= $ror_part; ++ } else { ++ my $ror_part = <<___; ++ @{[roriw_rv64i $T5, $T1, $T7, $T8, 17]} ++ @{[roriw_rv64i $T6, $T1, $T7, $T8, 19]} ++___ ++ $code .= $ror_part; ++ } ++ $code .= <<___; ++ srliw $T1, $T1, 10 ++ xor $T1, $T1, $T5 ++ xor $T1, $T1, $T6 ++ addw $T1, $T1, $T3 ++___ ++ if ($use_zbb) { ++ my $ror_part = <<___; ++ @{[roriw $T5, $T2, 7]} # roriw $T5, $T2, 7 ++ @{[roriw $T6, $T2, 18]} # roriw $T6, $T2, 18 ++___ ++ $code .= $ror_part; ++ } else { ++ my $ror_part = <<___; ++ @{[roriw_rv64i $T5, $T2, $T7, $T8, 7]} ++ @{[roriw_rv64i $T6, $T2, $T7, $T8, 18]} ++___ ++ $code .= $ror_part; ++ } ++ $code .= <<___; ++ srliw $T2, $T2, 3 ++ xor $T2, $T2, $T5 ++ xor $T2, $T2, $T6 ++ addw $T1, $T1, $T2 ++ addw $T1, $T1, $T4 ++ sw $T1, 4*($INDEX&0x0f)($ADDR) ++___ ++ ++ return $code; ++} ++ ++sub sha256_T1 { ++ my ( ++ $INDEX, $e, $f, $g, $h, ++ ) = @_; ++ my $code=<<___; ++ lw $T4, 4*$INDEX($KT) ++ addw $h, $h, $T1 ++ addw $h, $h, $T4 ++___ ++ if ($use_zbb) { ++ my $ror_part = <<___; ++ @{[roriw $T2, $e, 6]} # roriw $T2, $e, 6 ++ @{[roriw $T3, $e, 11]} # roriw $T3, $e, 11 ++ @{[roriw $T4, $e, 25]} # roriw $T4, $e, 25 ++___ ++ $code .= $ror_part; ++ } else { ++ my $ror_part = <<___; ++ @{[roriw_rv64i $T2, $e, $T7, $T8, 6]} ++ @{[roriw_rv64i $T3, $e, $T7, $T8, 11]} ++ @{[roriw_rv64i $T4, $e, $T7, $T8, 25]} ++___ ++ $code .= $ror_part; ++ } ++ $code .= <<___; ++ xor $T2, $T2, $T3 ++ xor $T1, $f, $g ++ xor $T2, $T2, $T4 ++ and $T1, $T1, $e ++ addw $h, $h, $T2 ++ xor $T1, $T1, $g ++ addw $T1, $T1, $h ++___ ++ ++ return $code; ++} ++ ++sub sha256_T2 { ++ my ( ++ $a, $b, $c, ++ ) = @_; ++ my $code=<<___; ++ # Sum0 ++___ ++ if ($use_zbb) { ++ my $ror_part = <<___; ++ @{[roriw $T2, $a, 2]} # roriw $T2, $a, 2 ++ @{[roriw $T3, $a, 13]} # roriw $T3, $a, 13 ++ @{[roriw $T4, $a, 22]} # roriw $T4, $a, 22 ++___ ++ $code .= $ror_part; ++ } else { ++ my $ror_part = <<___; ++ @{[roriw_rv64i $T2, $a, $T7, $T8, 2]} ++ @{[roriw_rv64i $T3, $a, $T7, $T8, 13]} ++ @{[roriw_rv64i $T4, $a, $T7, $T8, 22]} ++___ ++ $code .= $ror_part; ++ } ++ $code .= <<___; ++ xor $T2, $T2, $T3 ++ xor $T2, $T2, $T4 ++ # Maj ++ xor $T4, $b, $c ++ and $T3, $b, $c ++ and $T4, $T4, $a ++ xor $T4, $T4, $T3 ++ # T2 ++ addw $T2, $T2, $T4 ++___ ++ ++ return $code; ++} ++ ++sub SHA256ROUND { ++ my ( ++ $INDEX, $a, $b, $c, $d, $e, $f, $g, $h ++ ) = @_; ++ my $code=<<___; ++ @{[sha256_T1 $INDEX, $e, $f, $g, $h]} ++ @{[sha256_T2 $a, $b, $c]} ++ addw $d, $d, $T1 ++ addw $h, $T2, $T1 ++___ ++ ++ return $code; ++} ++ ++sub SHA256ROUND0 { ++ my ( ++ $INDEX, $a, $b, $c, $d, $e, $f, $g, $h ++ ) = @_; ++ my $code=<<___; ++ @{[MSGSCHEDULE0 $INDEX]} ++ @{[SHA256ROUND $INDEX, $a, $b, $c, $d, $e, $f, $g, $h]} ++___ ++ ++ return $code; ++} ++ ++sub SHA256ROUND1 { ++ my ( ++ $INDEX, $a, $b, $c, $d, $e, $f, $g, $h ++ ) = @_; ++ my $code=<<___; ++ @{[MSGSCHEDULE1 $INDEX]} ++ @{[SHA256ROUND $INDEX, $a, $b, $c, $d, $e, $f, $g, $h]} ++___ ++ ++ return $code; ++} ++ ++################################################################################ ++# void sha256_block_data_order@{[$isaext]}(void *c, const void *p, size_t len) ++$code .= <<___; ++.p2align 3 ++.globl sha256_block_data_order@{[$isaext]} ++.type sha256_block_data_order@{[$isaext]},\@function ++sha256_block_data_order@{[$isaext]}: ++ ++ addi sp, sp, -96 ++ ++ sd s0, 0(sp) ++ sd s1, 8(sp) ++ sd s2, 16(sp) ++ sd s3, 24(sp) ++ sd s4, 32(sp) ++ sd s5, 40(sp) ++ sd s6, 48(sp) ++ sd s7, 56(sp) ++ sd s8, 64(sp) ++ sd s9, 72(sp) ++ sd s10, 80(sp) ++ sd s11, 88(sp) ++ ++ addi sp, sp, -64 ++ ++ la $KT, $K256 ++ ++ # load ctx ++ lw $A, 0(a0) ++ lw $B, 4(a0) ++ lw $C, 8(a0) ++ lw $D, 12(a0) ++ lw $E, 16(a0) ++ lw $F, 20(a0) ++ lw $G, 24(a0) ++ lw $H, 28(a0) ++ ++L_round_loop: ++ # Decrement length by 1 ++ addi $LEN, $LEN, -1 ++ ++ @{[SHA256ROUND0 0, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND0 1, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND0 2, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND0 3, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND0 4, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND0 5, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND0 6, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND0 7, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA256ROUND0 8, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND0 9, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND0 10, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND0 11, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND0 12, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND0 13, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND0 14, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND0 15, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA256ROUND1 16, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND1 17, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND1 18, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND1 19, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND1 20, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND1 21, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND1 22, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND1 23, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA256ROUND1 24, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND1 25, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND1 26, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND1 27, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND1 28, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND1 29, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND1 30, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND1 31, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA256ROUND1 32, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND1 33, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND1 34, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND1 35, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND1 36, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND1 37, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND1 38, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND1 39, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA256ROUND1 40, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND1 41, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND1 42, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND1 43, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND1 44, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND1 45, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND1 46, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND1 47, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA256ROUND1 48, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND1 49, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND1 50, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND1 51, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND1 52, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND1 53, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND1 54, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND1 55, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA256ROUND1 56, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA256ROUND1 57, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA256ROUND1 58, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA256ROUND1 59, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA256ROUND1 60, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA256ROUND1 61, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA256ROUND1 62, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA256ROUND1 63, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ lw $T1, 0(a0) ++ lw $T2, 4(a0) ++ lw $T3, 8(a0) ++ lw $T4, 12(a0) ++ ++ addw $A, $A, $T1 ++ addw $B, $B, $T2 ++ addw $C, $C, $T3 ++ addw $D, $D, $T4 ++ ++ sw $A, 0(a0) ++ sw $B, 4(a0) ++ sw $C, 8(a0) ++ sw $D, 12(a0) ++ ++ lw $T1, 16(a0) ++ lw $T2, 20(a0) ++ lw $T3, 24(a0) ++ lw $T4, 28(a0) ++ ++ addw $E, $E, $T1 ++ addw $F, $F, $T2 ++ addw $G, $G, $T3 ++ addw $H, $H, $T4 ++ ++ sw $E, 16(a0) ++ sw $F, 20(a0) ++ sw $G, 24(a0) ++ sw $H, 28(a0) ++ ++ addi $INP, $INP, 64 ++ ++ bnez $LEN, L_round_loop ++ ++ addi sp, sp, 64 ++ ++ ld s0, 0(sp) ++ ld s1, 8(sp) ++ ld s2, 16(sp) ++ ld s3, 24(sp) ++ ld s4, 32(sp) ++ ld s5, 40(sp) ++ ld s6, 48(sp) ++ ld s7, 56(sp) ++ ld s8, 64(sp) ++ ld s9, 72(sp) ++ ld s10, 80(sp) ++ ld s11, 88(sp) ++ ++ addi sp, sp, 96 ++ ++ ret ++.size sha256_block_data_order@{[$isaext]},.-sha256_block_data_order@{[$isaext]} ++ ++.section .rodata ++.p2align 3 ++.type $K256,\@object ++$K256: ++ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 ++ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 ++ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 ++ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 ++ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc ++ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da ++ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 ++ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 ++ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 ++ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 ++ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 ++ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 ++ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 ++ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 ++ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 ++ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 ++.size $K256,.-$K256 ++___ ++ ++print $code; ++ ++close STDOUT or die "error closing STDOUT: $!"; +diff --git a/crypto/sha/asm/sha256-riscv64-zvkb-zvknha_or_zvknhb.pl b/crypto/sha/asm/sha256-riscv64-zvkb-zvknha_or_zvknhb.pl +new file mode 100644 +index 0000000000..5e4d6be345 +--- /dev/null ++++ b/crypto/sha/asm/sha256-riscv64-zvkb-zvknha_or_zvknhb.pl +@@ -0,0 +1,316 @@ ++#! /usr/bin/env perl ++# This file is dual-licensed, meaning that you can use it under your ++# choice of either of the following two licenses: ++# ++# Copyright 2023 The OpenSSL Project Authors. All Rights Reserved. ++# ++# Licensed under the Apache License 2.0 (the "License"). You can obtain ++# a copy in the file LICENSE in the source distribution or at ++# https://www.openssl.org/source/license.html ++# ++# or ++# ++# Copyright (c) 2023, Christoph Müllner ++# Copyright (c) 2023, Phoebe Chen ++# All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions ++# are met: ++# 1. Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# 2. Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in the ++# documentation and/or other materials provided with the distribution. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# The generated code of this file depends on the following RISC-V extensions: ++# - RV64I ++# - RISC-V Vector ('V') with VLEN >= 128 ++# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb') ++# - RISC-V Vector SHA-2 Secure Hash extension ('Zvknha' or 'Zvknhb') ++ ++use strict; ++use warnings; ++ ++use FindBin qw($Bin); ++use lib "$Bin"; ++use lib "$Bin/../../perlasm"; ++use riscv; ++ ++# $output is the last argument if it looks like a file (it has an extension) ++# $flavour is the first argument if it doesn't look like a file ++my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef; ++my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef; ++ ++$output and open STDOUT,">$output"; ++ ++my $code=<<___; ++.text ++___ ++ ++my ($V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7, ++ $V8, $V9, $V10, $V11, $V12, $V13, $V14, $V15, ++ $V16, $V17, $V18, $V19, $V20, $V21, $V22, $V23, ++ $V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31, ++) = map("v$_",(0..31)); ++ ++my $K256 = "K256"; ++ ++# Function arguments ++my ($H, $INP, $LEN, $KT, $H2, $INDEX_PATTERN) = ("a0", "a1", "a2", "a3", "t3", "t4"); ++ ++sub sha_256_load_constant { ++ my $code=<<___; ++ la $KT, $K256 # Load round constants K256 ++ @{[vle32_v $V10, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V11, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V12, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V13, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V14, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V15, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V16, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V17, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V18, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V19, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V20, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V21, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V22, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V23, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V24, $KT]} ++ addi $KT, $KT, 16 ++ @{[vle32_v $V25, $KT]} ++___ ++ ++ return $code; ++} ++ ++################################################################################ ++# void sha256_block_data_order_zvkb_zvknha_or_zvknhb(void *c, const void *p, size_t len) ++$code .= <<___; ++.p2align 2 ++.globl sha256_block_data_order_zvkb_zvknha_or_zvknhb ++.type sha256_block_data_order_zvkb_zvknha_or_zvknhb,\@function ++sha256_block_data_order_zvkb_zvknha_or_zvknhb: ++ @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]} ++ ++ @{[sha_256_load_constant]} ++ ++ # H is stored as {a,b,c,d},{e,f,g,h}, but we need {f,e,b,a},{h,g,d,c} ++ # The dst vtype is e32m1 and the index vtype is e8mf4. ++ # We use index-load with the following index pattern at v26. ++ # i8 index: ++ # 20, 16, 4, 0 ++ # Instead of setting the i8 index, we could use a single 32bit ++ # little-endian value to cover the 4xi8 index. ++ # i32 value: ++ # 0x 00 04 10 14 ++ li $INDEX_PATTERN, 0x00041014 ++ @{[vsetivli "zero", 1, "e32", "m1", "ta", "ma"]} ++ @{[vmv_v_x $V26, $INDEX_PATTERN]} ++ ++ addi $H2, $H, 8 ++ ++ # Use index-load to get {f,e,b,a},{h,g,d,c} ++ @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]} ++ @{[vluxei8_v $V6, $H, $V26]} ++ @{[vluxei8_v $V7, $H2, $V26]} ++ ++ # Setup v0 mask for the vmerge to replace the first word (idx==0) in key-scheduling. ++ # The AVL is 4 in SHA, so we could use a single e8(8 element masking) for masking. ++ @{[vsetivli "zero", 1, "e8", "m1", "ta", "ma"]} ++ @{[vmv_v_i $V0, 0x01]} ++ ++ @{[vsetivli "zero", 4, "e32", "m1", "ta", "ma"]} ++ ++L_round_loop: ++ # Decrement length by 1 ++ add $LEN, $LEN, -1 ++ ++ # Keep the current state as we need it later: H' = H+{a',b',c',...,h'}. ++ @{[vmv_v_v $V30, $V6]} ++ @{[vmv_v_v $V31, $V7]} ++ ++ # Load the 512-bits of the message block in v1-v4 and perform ++ # an endian swap on each 4 bytes element. ++ @{[vle32_v $V1, $INP]} ++ @{[vrev8_v $V1, $V1]} ++ add $INP, $INP, 16 ++ @{[vle32_v $V2, $INP]} ++ @{[vrev8_v $V2, $V2]} ++ add $INP, $INP, 16 ++ @{[vle32_v $V3, $INP]} ++ @{[vrev8_v $V3, $V3]} ++ add $INP, $INP, 16 ++ @{[vle32_v $V4, $INP]} ++ @{[vrev8_v $V4, $V4]} ++ add $INP, $INP, 16 ++ ++ # Quad-round 0 (+0, Wt from oldest to newest in v1->v2->v3->v4) ++ @{[vadd_vv $V5, $V10, $V1]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V3, $V2, $V0]} ++ @{[vsha2ms_vv $V1, $V5, $V4]} # Generate W[19:16] ++ ++ # Quad-round 1 (+1, v2->v3->v4->v1) ++ @{[vadd_vv $V5, $V11, $V2]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V4, $V3, $V0]} ++ @{[vsha2ms_vv $V2, $V5, $V1]} # Generate W[23:20] ++ ++ # Quad-round 2 (+2, v3->v4->v1->v2) ++ @{[vadd_vv $V5, $V12, $V3]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V1, $V4, $V0]} ++ @{[vsha2ms_vv $V3, $V5, $V2]} # Generate W[27:24] ++ ++ # Quad-round 3 (+3, v4->v1->v2->v3) ++ @{[vadd_vv $V5, $V13, $V4]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V2, $V1, $V0]} ++ @{[vsha2ms_vv $V4, $V5, $V3]} # Generate W[31:28] ++ ++ # Quad-round 4 (+0, v1->v2->v3->v4) ++ @{[vadd_vv $V5, $V14, $V1]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V3, $V2, $V0]} ++ @{[vsha2ms_vv $V1, $V5, $V4]} # Generate W[35:32] ++ ++ # Quad-round 5 (+1, v2->v3->v4->v1) ++ @{[vadd_vv $V5, $V15, $V2]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V4, $V3, $V0]} ++ @{[vsha2ms_vv $V2, $V5, $V1]} # Generate W[39:36] ++ ++ # Quad-round 6 (+2, v3->v4->v1->v2) ++ @{[vadd_vv $V5, $V16, $V3]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V1, $V4, $V0]} ++ @{[vsha2ms_vv $V3, $V5, $V2]} # Generate W[43:40] ++ ++ # Quad-round 7 (+3, v4->v1->v2->v3) ++ @{[vadd_vv $V5, $V17, $V4]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V2, $V1, $V0]} ++ @{[vsha2ms_vv $V4, $V5, $V3]} # Generate W[47:44] ++ ++ # Quad-round 8 (+0, v1->v2->v3->v4) ++ @{[vadd_vv $V5, $V18, $V1]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V3, $V2, $V0]} ++ @{[vsha2ms_vv $V1, $V5, $V4]} # Generate W[51:48] ++ ++ # Quad-round 9 (+1, v2->v3->v4->v1) ++ @{[vadd_vv $V5, $V19, $V2]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V4, $V3, $V0]} ++ @{[vsha2ms_vv $V2, $V5, $V1]} # Generate W[55:52] ++ ++ # Quad-round 10 (+2, v3->v4->v1->v2) ++ @{[vadd_vv $V5, $V20, $V3]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V1, $V4, $V0]} ++ @{[vsha2ms_vv $V3, $V5, $V2]} # Generate W[59:56] ++ ++ # Quad-round 11 (+3, v4->v1->v2->v3) ++ @{[vadd_vv $V5, $V21, $V4]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ @{[vmerge_vvm $V5, $V2, $V1, $V0]} ++ @{[vsha2ms_vv $V4, $V5, $V3]} # Generate W[63:60] ++ ++ # Quad-round 12 (+0, v1->v2->v3->v4) ++ # Note that we stop generating new message schedule words (Wt, v1-13) ++ # as we already generated all the words we end up consuming (i.e., W[63:60]). ++ @{[vadd_vv $V5, $V22, $V1]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ ++ # Quad-round 13 (+1, v2->v3->v4->v1) ++ @{[vadd_vv $V5, $V23, $V2]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ ++ # Quad-round 14 (+2, v3->v4->v1->v2) ++ @{[vadd_vv $V5, $V24, $V3]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ ++ # Quad-round 15 (+3, v4->v1->v2->v3) ++ @{[vadd_vv $V5, $V25, $V4]} ++ @{[vsha2cl_vv $V7, $V6, $V5]} ++ @{[vsha2ch_vv $V6, $V7, $V5]} ++ ++ # H' = H+{a',b',c',...,h'} ++ @{[vadd_vv $V6, $V30, $V6]} ++ @{[vadd_vv $V7, $V31, $V7]} ++ bnez $LEN, L_round_loop ++ ++ # Store {f,e,b,a},{h,g,d,c} back to {a,b,c,d},{e,f,g,h}. ++ @{[vsuxei8_v $V6, $H, $V26]} ++ @{[vsuxei8_v $V7, $H2, $V26]} ++ ++ ret ++.size sha256_block_data_order_zvkb_zvknha_or_zvknhb,.-sha256_block_data_order_zvkb_zvknha_or_zvknhb ++ ++.p2align 2 ++.type $K256,\@object ++$K256: ++ .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 ++ .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 ++ .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 ++ .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 ++ .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc ++ .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da ++ .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 ++ .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 ++ .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 ++ .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 ++ .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 ++ .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 ++ .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 ++ .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 ++ .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 ++ .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 ++.size $K256,.-$K256 ++___ ++ ++print $code; ++ ++close STDOUT or die "error closing STDOUT: $!"; +diff --git a/crypto/sha/asm/sha512-riscv64-zbb.pl b/crypto/sha/asm/sha512-riscv64-zbb.pl +new file mode 100644 +index 0000000000..6c86e1d068 +--- /dev/null ++++ b/crypto/sha/asm/sha512-riscv64-zbb.pl +@@ -0,0 +1,436 @@ ++#! /usr/bin/env perl ++# This file is dual-licensed, meaning that you can use it under your ++# choice of either of the following two licenses: ++# ++# Copyright 2025 The OpenSSL Project Authors. All Rights Reserved. ++# ++# Licensed under the Apache License 2.0 (the "License"). You can obtain ++# a copy in the file LICENSE in the source distribution or at ++# https://www.openssl.org/source/license.html ++# ++# or ++# ++# Copyright (c) 2025, Julian Zhu ++# All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions ++# are met: ++# 1. Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# 2. Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in the ++# documentation and/or other materials provided with the distribution. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# The generated code of this file depends on the following RISC-V extensions: ++# - RV64I ++# - RISC-V Basic Bit-manipulation extension ('Zbb') ++ ++use strict; ++use warnings; ++ ++use FindBin qw($Bin); ++use lib "$Bin"; ++use lib "$Bin/../../perlasm"; ++use riscv; ++ ++# $output is the last argument if it looks like a file (it has an extension) ++# $flavour is the first argument if it doesn't look like a file ++my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef; ++my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef; ++ ++$output and open STDOUT,">$output"; ++ ++my $code=<<___; ++.text ++___ ++ ++my $K512 = "K512"; ++ ++# Function arguments ++my ($INP, $LEN, $ADDR) = ("a1", "a2", "sp"); ++my ($KT, $T1, $T2, $T3, $T4, $T5, $T6) = ("t0", "t1", "t2", "t3", "t4", "t5", "t6"); ++my ($A, $B, $C, $D ,$E ,$F ,$G ,$H) = ("s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9"); ++ ++sub MSGSCHEDULE0 { ++ my ( ++ $index, ++ ) = @_; ++ my $code=<<___; ++ ld $T1, (8*$index+0)($INP) ++ @{[rev8 $T1, $T1]} ++ sd $T1, 8*$index($ADDR) ++___ ++ ++ return $code; ++} ++ ++sub MSGSCHEDULE1 { ++ my ( ++ $INDEX, ++ ) = @_; ++ my $code=<<___; ++ ld $T1, (($INDEX-2)&0x0f)*8($ADDR) ++ ld $T2, (($INDEX-15)&0x0f)*8($ADDR) ++ ld $T3, (($INDEX-7)&0x0f)*8($ADDR) ++ ld $T4, ($INDEX&0x0f)*8($ADDR) ++ @{[rori $T5, $T1, 19]} ++ @{[rori $T6, $T1, 61]} ++ srli $T1, $T1, 6 ++ xor $T1, $T1, $T5 ++ xor $T1, $T1, $T6 ++ add $T1, $T1, $T3 ++ @{[rori $T5, $T2, 1]} ++ @{[rori $T6, $T2, 8]} ++ srli $T2, $T2, 7 ++ xor $T2, $T2, $T5 ++ xor $T2, $T2, $T6 ++ add $T1, $T1, $T2 ++ add $T1, $T1, $T4 ++ sd $T1, 8*($INDEX&0x0f)($ADDR) ++___ ++ ++ return $code; ++} ++ ++sub sha512_T1 { ++ my ( ++ $INDEX, $e, $f, $g, $h, ++ ) = @_; ++ my $code=<<___; ++ ld $T4, 8*$INDEX($KT) ++ add $h, $h, $T1 ++ add $h, $h, $T4 ++ @{[rori $T2, $e, 14]} ++ @{[rori $T3, $e, 18]} ++ @{[rori $T4, $e, 41]} ++ xor $T2, $T2, $T3 ++ xor $T1, $f, $g ++ xor $T2, $T2, $T4 ++ and $T1, $T1, $e ++ add $h, $h, $T2 ++ xor $T1, $T1, $g ++ add $T1, $T1, $h ++___ ++ ++ return $code; ++} ++ ++sub sha512_T2 { ++ my ( ++ $a, $b, $c, ++ ) = @_; ++ my $code=<<___; ++ # Sigma0 ++ @{[rori $T2, $a, 28]} ++ @{[rori $T3, $a, 34]} ++ @{[rori $T4, $a, 39]} ++ xor $T2, $T2, $T3 ++ # Maj ++ xor $T5, $b, $c ++ and $T3, $b, $c ++ and $T5, $T5, $a ++ xor $T2, $T2, $T4 ++ xor $T3, $T3, $T5 ++ # T2 ++ add $T2, $T2, $T3 ++___ ++ ++ return $code; ++} ++ ++sub SHA512ROUND { ++ my ( ++ $INDEX, $a, $b, $c, $d, $e, $f, $g, $h ++ ) = @_; ++ my $code=<<___; ++ @{[sha512_T1 $INDEX, $e, $f, $g, $h]} ++ @{[sha512_T2 $a, $b, $c]} ++ add $d, $d, $T1 ++ add $h, $T2, $T1 ++___ ++ ++ return $code; ++} ++ ++sub SHA512ROUND0 { ++ my ( ++ $INDEX, $a, $b, $c, $d, $e, $f, $g, $h ++ ) = @_; ++ my $code=<<___; ++ @{[MSGSCHEDULE0 $INDEX]} ++ @{[SHA512ROUND $INDEX, $a, $b, $c, $d, $e, $f, $g, $h]} ++___ ++ ++ return $code; ++} ++ ++sub SHA512ROUND1 { ++ my ( ++ $INDEX, $a, $b, $c, $d, $e, $f, $g, $h ++ ) = @_; ++ my $code=<<___; ++ @{[MSGSCHEDULE1 $INDEX]} ++ @{[SHA512ROUND $INDEX, $a, $b, $c, $d, $e, $f, $g, $h]} ++___ ++ ++ return $code; ++} ++ ++################################################################################ ++# void sha512_block_data_order_zbb(void *c, const void *p, size_t len) ++$code .= <<___; ++.p2align 3 ++.globl sha512_block_data_order_zbb ++.type sha512_block_data_order_zbb,\@function ++sha512_block_data_order_zbb: ++ ++ addi sp, sp, -96 ++ ++ sd s0, 0(sp) ++ sd s1, 8(sp) ++ sd s2, 16(sp) ++ sd s3, 24(sp) ++ sd s4, 32(sp) ++ sd s5, 40(sp) ++ sd s6, 48(sp) ++ sd s7, 56(sp) ++ sd s8, 64(sp) ++ sd s9, 72(sp) ++ sd s10, 80(sp) ++ sd s11, 88(sp) ++ ++ addi sp, sp, -128 ++ ++ la $KT, $K512 ++ ++ # load ctx ++ ld $A, 0(a0) ++ ld $B, 8(a0) ++ ld $C, 16(a0) ++ ld $D, 24(a0) ++ ld $E, 32(a0) ++ ld $F, 40(a0) ++ ld $G, 48(a0) ++ ld $H, 56(a0) ++ ++L_round_loop: ++ # Decrement length by 1 ++ addi $LEN, $LEN, -1 ++ ++ @{[SHA512ROUND0 0, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND0 1, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND0 2, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND0 3, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND0 4, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND0 5, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND0 6, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND0 7, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND0 8, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND0 9, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND0 10, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND0 11, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND0 12, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND0 13, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND0 14, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND0 15, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 16, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 17, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 18, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 19, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 20, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 21, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 22, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 23, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 24, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 25, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 26, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 27, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 28, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 29, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 30, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 31, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 32, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 33, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 34, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 35, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 36, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 37, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 38, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 39, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 40, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 41, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 42, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 43, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 44, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 45, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 46, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 47, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 48, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 49, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 50, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 51, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 52, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 53, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 54, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 55, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 56, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 57, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 58, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 59, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 60, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 61, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 62, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 63, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 64, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 65, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 66, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 67, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 68, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 69, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 70, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 71, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ @{[SHA512ROUND1 72, $A, $B, $C, $D, $E, $F, $G, $H]} ++ @{[SHA512ROUND1 73, $H, $A, $B, $C, $D, $E, $F, $G]} ++ @{[SHA512ROUND1 74, $G, $H, $A, $B, $C, $D, $E, $F]} ++ @{[SHA512ROUND1 75, $F, $G, $H, $A, $B, $C, $D, $E]} ++ ++ @{[SHA512ROUND1 76, $E, $F, $G, $H, $A, $B, $C, $D]} ++ @{[SHA512ROUND1 77, $D, $E, $F, $G, $H, $A, $B, $C]} ++ @{[SHA512ROUND1 78, $C, $D, $E, $F, $G, $H, $A, $B]} ++ @{[SHA512ROUND1 79, $B, $C, $D, $E, $F, $G, $H, $A]} ++ ++ ld $T1, 0(a0) ++ ld $T2, 8(a0) ++ ld $T3, 16(a0) ++ ld $T4, 24(a0) ++ ++ add $A, $A, $T1 ++ add $B, $B, $T2 ++ add $C, $C, $T3 ++ add $D, $D, $T4 ++ ++ sd $A, 0(a0) ++ sd $B, 8(a0) ++ sd $C, 16(a0) ++ sd $D, 24(a0) ++ ++ ld $T1, 32(a0) ++ ld $T2, 40(a0) ++ ld $T3, 48(a0) ++ ld $T4, 56(a0) ++ ++ add $E, $E, $T1 ++ add $F, $F, $T2 ++ add $G, $G, $T3 ++ add $H, $H, $T4 ++ ++ sd $E, 32(a0) ++ sd $F, 40(a0) ++ sd $G, 48(a0) ++ sd $H, 56(a0) ++ ++ addi $INP, $INP, 128 ++ ++ bnez $LEN, L_round_loop ++ ++ addi sp, sp, 128 ++ ++ ld s0, 0(sp) ++ ld s1, 8(sp) ++ ld s2, 16(sp) ++ ld s3, 24(sp) ++ ld s4, 32(sp) ++ ld s5, 40(sp) ++ ld s6, 48(sp) ++ ld s7, 56(sp) ++ ld s8, 64(sp) ++ ld s9, 72(sp) ++ ld s10, 80(sp) ++ ld s11, 88(sp) ++ ++ addi sp, sp, 96 ++ ++ ret ++.size sha512_block_data_order_zbb,.-sha512_block_data_order_zbb ++ ++.section .rodata ++.p2align 3 ++.type $K512,\@object ++$K512: ++ .dword 0x428a2f98d728ae22, 0x7137449123ef65cd ++ .dword 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc ++ .dword 0x3956c25bf348b538, 0x59f111f1b605d019 ++ .dword 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 ++ .dword 0xd807aa98a3030242, 0x12835b0145706fbe ++ .dword 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 ++ .dword 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 ++ .dword 0x9bdc06a725c71235, 0xc19bf174cf692694 ++ .dword 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 ++ .dword 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 ++ .dword 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 ++ .dword 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 ++ .dword 0x983e5152ee66dfab, 0xa831c66d2db43210 ++ .dword 0xb00327c898fb213f, 0xbf597fc7beef0ee4 ++ .dword 0xc6e00bf33da88fc2, 0xd5a79147930aa725 ++ .dword 0x06ca6351e003826f, 0x142929670a0e6e70 ++ .dword 0x27b70a8546d22ffc, 0x2e1b21385c26c926 ++ .dword 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df ++ .dword 0x650a73548baf63de, 0x766a0abb3c77b2a8 ++ .dword 0x81c2c92e47edaee6, 0x92722c851482353b ++ .dword 0xa2bfe8a14cf10364, 0xa81a664bbc423001 ++ .dword 0xc24b8b70d0f89791, 0xc76c51a30654be30 ++ .dword 0xd192e819d6ef5218, 0xd69906245565a910 ++ .dword 0xf40e35855771202a, 0x106aa07032bbd1b8 ++ .dword 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 ++ .dword 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 ++ .dword 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb ++ .dword 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 ++ .dword 0x748f82ee5defb2fc, 0x78a5636f43172f60 ++ .dword 0x84c87814a1f0ab72, 0x8cc702081a6439ec ++ .dword 0x90befffa23631e28, 0xa4506cebde82bde9 ++ .dword 0xbef9a3f7b2c67915, 0xc67178f2e372532b ++ .dword 0xca273eceea26619c, 0xd186b8c721c0c207 ++ .dword 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 ++ .dword 0x06f067aa72176fba, 0x0a637dc5a2c898a6 ++ .dword 0x113f9804bef90dae, 0x1b710b35131c471b ++ .dword 0x28db77f523047d84, 0x32caab7b40c72493 ++ .dword 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c ++ .dword 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a ++ .dword 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 ++.size $K512,.-$K512 ++___ ++ ++print $code; ++ ++close STDOUT or die "error closing STDOUT: $!"; +diff --git a/crypto/sha/asm/sha512-riscv64-zvkb-zvknhb.pl b/crypto/sha/asm/sha512-riscv64-zvkb-zvknhb.pl +new file mode 100644 +index 0000000000..c5df987296 +--- /dev/null ++++ b/crypto/sha/asm/sha512-riscv64-zvkb-zvknhb.pl +@@ -0,0 +1,264 @@ ++#! /usr/bin/env perl ++# This file is dual-licensed, meaning that you can use it under your ++# choice of either of the following two licenses: ++# ++# Copyright 2023 The OpenSSL Project Authors. All Rights Reserved. ++# ++# Licensed under the Apache License 2.0 (the "License"). You can obtain ++# a copy in the file LICENSE in the source distribution or at ++# https://www.openssl.org/source/license.html ++# ++# or ++# ++# Copyright (c) 2023, Christoph Müllner ++# Copyright (c) 2023, Phoebe Chen ++# All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions ++# are met: ++# 1. Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# 2. Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in the ++# documentation and/or other materials provided with the distribution. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ ++# The generated code of this file depends on the following RISC-V extensions: ++# - RV64I ++# - RISC-V vector ('V') with VLEN >= 128 ++# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb') ++# - RISC-V Vector SHA-2 Secure Hash extension ('Zvknhb') ++ ++use strict; ++use warnings; ++ ++use FindBin qw($Bin); ++use lib "$Bin"; ++use lib "$Bin/../../perlasm"; ++use riscv; ++ ++# $output is the last argument if it looks like a file (it has an extension) ++# $flavour is the first argument if it doesn't look like a file ++my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef; ++my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef; ++ ++$output and open STDOUT,">$output"; ++ ++my $code=<<___; ++.text ++___ ++ ++my ($V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7, ++ $V8, $V9, $V10, $V11, $V12, $V13, $V14, $V15, ++ $V16, $V17, $V18, $V19, $V20, $V21, $V22, $V23, ++ $V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31, ++) = map("v$_",(0..31)); ++ ++my $K512 = "K512"; ++ ++# Function arguments ++my ($H, $INP, $LEN, $KT, $H2, $INDEX_PATTERN) = ("a0", "a1", "a2", "a3", "t3", "t4"); ++ ++################################################################################ ++# void sha512_block_data_order_zvkb_zvknhb(void *c, const void *p, size_t len) ++$code .= <<___; ++.p2align 2 ++.globl sha512_block_data_order_zvkb_zvknhb ++.type sha512_block_data_order_zvkb_zvknhb,\@function ++sha512_block_data_order_zvkb_zvknhb: ++ @{[vsetivli "zero", 4, "e64", "m2", "ta", "ma"]} ++ ++ # H is stored as {a,b,c,d},{e,f,g,h}, but we need {f,e,b,a},{h,g,d,c} ++ # The dst vtype is e64m2 and the index vtype is e8mf4. ++ # We use index-load with the following index pattern at v1. ++ # i8 index: ++ # 40, 32, 8, 0 ++ # Instead of setting the i8 index, we could use a single 32bit ++ # little-endian value to cover the 4xi8 index. ++ # i32 value: ++ # 0x 00 08 20 28 ++ li $INDEX_PATTERN, 0x00082028 ++ @{[vsetivli "zero", 1, "e32", "m1", "ta", "ma"]} ++ @{[vmv_v_x $V1, $INDEX_PATTERN]} ++ ++ addi $H2, $H, 16 ++ ++ # Use index-load to get {f,e,b,a},{h,g,d,c} ++ @{[vsetivli "zero", 4, "e64", "m2", "ta", "ma"]} ++ @{[vluxei8_v $V22, $H, $V1]} ++ @{[vluxei8_v $V24, $H2, $V1]} ++ ++ # Setup v0 mask for the vmerge to replace the first word (idx==0) in key-scheduling. ++ # The AVL is 4 in SHA, so we could use a single e8(8 element masking) for masking. ++ @{[vsetivli "zero", 1, "e8", "m1", "ta", "ma"]} ++ @{[vmv_v_i $V0, 0x01]} ++ ++ @{[vsetivli "zero", 4, "e64", "m2", "ta", "ma"]} ++ ++L_round_loop: ++ # Load round constants K512 ++ la $KT, $K512 ++ ++ # Decrement length by 1 ++ addi $LEN, $LEN, -1 ++ ++ # Keep the current state as we need it later: H' = H+{a',b',c',...,h'}. ++ @{[vmv_v_v $V26, $V22]} ++ @{[vmv_v_v $V28, $V24]} ++ ++ # Load the 1024-bits of the message block in v10-v16 and perform the endian ++ # swap. ++ @{[vle64_v $V10, $INP]} ++ @{[vrev8_v $V10, $V10]} ++ addi $INP, $INP, 32 ++ @{[vle64_v $V12, $INP]} ++ @{[vrev8_v $V12, $V12]} ++ addi $INP, $INP, 32 ++ @{[vle64_v $V14, $INP]} ++ @{[vrev8_v $V14, $V14]} ++ addi $INP, $INP, 32 ++ @{[vle64_v $V16, $INP]} ++ @{[vrev8_v $V16, $V16]} ++ addi $INP, $INP, 32 ++ ++ .rept 4 ++ # Quad-round 0 (+0, v10->v12->v14->v16) ++ @{[vle64_v $V20, ($KT)]} ++ addi $KT, $KT, 32 ++ @{[vadd_vv $V18, $V20, $V10]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ @{[vmerge_vvm $V18, $V14, $V12, $V0]} ++ @{[vsha2ms_vv $V10, $V18, $V16]} ++ ++ # Quad-round 1 (+1, v12->v14->v16->v10) ++ @{[vle64_v $V20, ($KT)]} ++ addi $KT, $KT, 32 ++ @{[vadd_vv $V18, $V20, $V12]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ @{[vmerge_vvm $V18, $V16, $V14, $V0]} ++ @{[vsha2ms_vv $V12, $V18, $V10]} ++ ++ # Quad-round 2 (+2, v14->v16->v10->v12) ++ @{[vle64_v $V20, ($KT)]} ++ addi $KT, $KT, 32 ++ @{[vadd_vv $V18, $V20, $V14]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ @{[vmerge_vvm $V18, $V10, $V16, $V0]} ++ @{[vsha2ms_vv $V14, $V18, $V12]} ++ ++ # Quad-round 3 (+3, v16->v10->v12->v14) ++ @{[vle64_v $V20, ($KT)]} ++ addi $KT, $KT, 32 ++ @{[vadd_vv $V18, $V20, $V16]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ @{[vmerge_vvm $V18, $V12, $V10, $V0]} ++ @{[vsha2ms_vv $V16, $V18, $V14]} ++ .endr ++ ++ # Quad-round 16 (+0, v10->v12->v14->v16) ++ # Note that we stop generating new message schedule words (Wt, v10-16) ++ # as we already generated all the words we end up consuming (i.e., W[79:76]). ++ @{[vle64_v $V20, ($KT)]} ++ addi $KT, $KT, 32 ++ @{[vadd_vv $V18, $V20, $V10]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ ++ # Quad-round 17 (+1, v12->v14->v16->v10) ++ @{[vle64_v $V20, ($KT)]} ++ addi $KT, $KT, 32 ++ @{[vadd_vv $V18, $V20, $V12]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ ++ # Quad-round 18 (+2, v14->v16->v10->v12) ++ @{[vle64_v $V20, ($KT)]} ++ addi $KT, $KT, 32 ++ @{[vadd_vv $V18, $V20, $V14]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ ++ # Quad-round 19 (+3, v16->v10->v12->v14) ++ @{[vle64_v $V20, ($KT)]} ++ # No t1 increment needed. ++ @{[vadd_vv $V18, $V20, $V16]} ++ @{[vsha2cl_vv $V24, $V22, $V18]} ++ @{[vsha2ch_vv $V22, $V24, $V18]} ++ ++ # H' = H+{a',b',c',...,h'} ++ @{[vadd_vv $V22, $V26, $V22]} ++ @{[vadd_vv $V24, $V28, $V24]} ++ bnez $LEN, L_round_loop ++ ++ # Store {f,e,b,a},{h,g,d,c} back to {a,b,c,d},{e,f,g,h}. ++ @{[vsuxei8_v $V22, ($H), $V1]} ++ @{[vsuxei8_v $V24, ($H2), $V1]} ++ ++ ret ++.size sha512_block_data_order_zvkb_zvknhb,.-sha512_block_data_order_zvkb_zvknhb ++ ++.p2align 3 ++.type $K512,\@object ++$K512: ++ .dword 0x428a2f98d728ae22, 0x7137449123ef65cd ++ .dword 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc ++ .dword 0x3956c25bf348b538, 0x59f111f1b605d019 ++ .dword 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 ++ .dword 0xd807aa98a3030242, 0x12835b0145706fbe ++ .dword 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 ++ .dword 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 ++ .dword 0x9bdc06a725c71235, 0xc19bf174cf692694 ++ .dword 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 ++ .dword 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 ++ .dword 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 ++ .dword 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 ++ .dword 0x983e5152ee66dfab, 0xa831c66d2db43210 ++ .dword 0xb00327c898fb213f, 0xbf597fc7beef0ee4 ++ .dword 0xc6e00bf33da88fc2, 0xd5a79147930aa725 ++ .dword 0x06ca6351e003826f, 0x142929670a0e6e70 ++ .dword 0x27b70a8546d22ffc, 0x2e1b21385c26c926 ++ .dword 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df ++ .dword 0x650a73548baf63de, 0x766a0abb3c77b2a8 ++ .dword 0x81c2c92e47edaee6, 0x92722c851482353b ++ .dword 0xa2bfe8a14cf10364, 0xa81a664bbc423001 ++ .dword 0xc24b8b70d0f89791, 0xc76c51a30654be30 ++ .dword 0xd192e819d6ef5218, 0xd69906245565a910 ++ .dword 0xf40e35855771202a, 0x106aa07032bbd1b8 ++ .dword 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 ++ .dword 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 ++ .dword 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb ++ .dword 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 ++ .dword 0x748f82ee5defb2fc, 0x78a5636f43172f60 ++ .dword 0x84c87814a1f0ab72, 0x8cc702081a6439ec ++ .dword 0x90befffa23631e28, 0xa4506cebde82bde9 ++ .dword 0xbef9a3f7b2c67915, 0xc67178f2e372532b ++ .dword 0xca273eceea26619c, 0xd186b8c721c0c207 ++ .dword 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 ++ .dword 0x06f067aa72176fba, 0x0a637dc5a2c898a6 ++ .dword 0x113f9804bef90dae, 0x1b710b35131c471b ++ .dword 0x28db77f523047d84, 0x32caab7b40c72493 ++ .dword 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c ++ .dword 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a ++ .dword 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 ++.size $K512,.-$K512 ++___ ++ ++print $code; ++ ++close STDOUT or die "error closing STDOUT: $!"; +diff --git a/crypto/sha/build.info b/crypto/sha/build.info +index 9b368d8f02..4dd093cea2 100644 +--- a/crypto/sha/build.info ++++ b/crypto/sha/build.info +@@ -49,6 +49,9 @@ IF[{- !$disabled{asm} -}] + $SHA1ASM_c64xplus=sha1-c64xplus.s sha256-c64xplus.s sha512-c64xplus.s + $SHA1DEF_c64xplus=SHA1_ASM SHA256_ASM SHA512_ASM + ++ $SHA1ASM_riscv64=sha_riscv.c sha256-riscv64.S sha256-riscv64-zbb.S sha256-riscv64-zvkb-zvknha_or_zvknhb.S sha512-riscv64-zbb.S sha512-riscv64-zvkb-zvknhb.S ++ $SHA1DEF_riscv64=SHA256_ASM INCLUDE_C_SHA256 SHA512_ASM INCLUDE_C_SHA512 ++ + # Now that we have defined all the arch specific variables, use the + # appropriate one, and define the appropriate macros + IF[$SHA1ASM_{- $target{asm_arch} -}] +@@ -135,6 +138,11 @@ GENERATE[sha1-parisc.s]=asm/sha1-parisc.pl + GENERATE[sha256-parisc.s]=asm/sha512-parisc.pl + GENERATE[sha512-parisc.s]=asm/sha512-parisc.pl + ++GENERATE[sha256-loongarch64.S]=asm/sha256-loongarch64.pl ++INCLUDE[sha256-loongarch64.o]=.. ++GENERATE[sha512-loongarch64.S]=asm/sha512-loongarch64.pl ++INCLUDE[sha512-loongarch64.o]=.. ++ + GENERATE[sha1-mips.S]=asm/sha1-mips.pl + INCLUDE[sha1-mips.o]=.. + GENERATE[sha256-mips.S]=asm/sha512-mips.pl +@@ -171,6 +179,13 @@ GENERATE[keccak1600-s390x.S]=asm/keccak1600-s390x.pl + GENERATE[sha1-c64xplus.S]=asm/sha1-c64xplus.pl + GENERATE[sha256-c64xplus.S]=asm/sha256-c64xplus.pl + GENERATE[sha512-c64xplus.S]=asm/sha512-c64xplus.pl ++ ++GENERATE[sha256-riscv64.S]=asm/sha256-riscv64-zbb.pl ++GENERATE[sha256-riscv64-zbb.S]=asm/sha256-riscv64-zbb.pl zbb ++GENERATE[sha256-riscv64-zvkb-zvknha_or_zvknhb.S]=asm/sha256-riscv64-zvkb-zvknha_or_zvknhb.pl ++GENERATE[sha512-riscv64-zbb.S]=asm/sha512-riscv64-zbb.pl ++GENERATE[sha512-riscv64-zvkb-zvknhb.S]=asm/sha512-riscv64-zvkb-zvknhb.pl ++ + GENERATE[keccak1600-c64x.S]=asm/keccak1600-c64x.pl + + # These are not yet used +diff --git a/crypto/sha/sha256.c b/crypto/sha/sha256.c +index 5845c38937..7ef6023b0a 100644 +--- a/crypto/sha/sha256.c ++++ b/crypto/sha/sha256.c +@@ -104,12 +104,16 @@ int SHA224_Final(unsigned char *md, SHA256_CTX *c) + #define HASH_BLOCK_DATA_ORDER sha256_block_data_order + #ifndef SHA256_ASM + static +-#endif ++#else ++# ifdef INCLUDE_C_SHA256 ++void sha256_block_data_order_c(SHA256_CTX *ctx, const void *in, size_t num); ++# endif /* INCLUDE_C_SHA256 */ ++#endif /* SHA256_ASM */ + void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num); + + #include "crypto/md32_common.h" + +-#ifndef SHA256_ASM ++#if !defined(SHA256_ASM) || defined(INCLUDE_C_SHA256) + static const SHA_LONG K256[64] = { + 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, + 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, +@@ -222,8 +226,12 @@ static void sha256_block_data_order(SHA256_CTX *ctx, const void *in, + T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ + ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0) + ++#ifdef INCLUDE_C_SHA256 ++void sha256_block_data_order_c(SHA256_CTX *ctx, const void *in, size_t num) ++#else + static void sha256_block_data_order(SHA256_CTX *ctx, const void *in, + size_t num) ++#endif + { + unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1; + SHA_LONG X[16]; +diff --git a/crypto/sha/sha512.c b/crypto/sha/sha512.c +index ff035c469f..ec236c8635 100644 +--- a/crypto/sha/sha512.c ++++ b/crypto/sha/sha512.c +@@ -149,6 +149,10 @@ int SHA512_Init(SHA512_CTX *c) + + #ifndef SHA512_ASM + static ++#else ++# ifdef INCLUDE_C_SHA512 ++void sha512_block_data_order_c(SHA512_CTX *ctx, const void *in, size_t num); ++# endif + #endif + void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num); + +@@ -338,7 +342,7 @@ void SHA512_Transform(SHA512_CTX *c, const unsigned char *data) + sha512_block_data_order(c, data, 1); + } + +-#ifndef SHA512_ASM ++#if !defined(SHA512_ASM) || defined(INCLUDE_C_SHA512) + static const SHA_LONG64 K512[80] = { + U64(0x428a2f98d728ae22), U64(0x7137449123ef65cd), + U64(0xb5c0fbcfec4d3b2f), U64(0xe9b5dba58189dbbc), +@@ -628,8 +632,12 @@ static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, + T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \ + ROUND_00_15(i+j,a,b,c,d,e,f,g,h); } while (0) + ++#ifdef INCLUDE_C_SHA512 ++void sha512_block_data_order_c(SHA512_CTX *ctx, const void *in, size_t num) ++#else + static void sha512_block_data_order(SHA512_CTX *ctx, const void *in, + size_t num) ++#endif + { + const SHA_LONG64 *W = in; + SHA_LONG64 a, b, c, d, e, f, g, h, s0, s1, T1; +diff --git a/crypto/sha/sha_riscv.c b/crypto/sha/sha_riscv.c +new file mode 100644 +index 0000000000..9cf4d22976 +--- /dev/null ++++ b/crypto/sha/sha_riscv.c +@@ -0,0 +1,49 @@ ++/* ++ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved. ++ * ++ * Licensed under the Apache License 2.0 (the "License"). You may not use ++ * this file except in compliance with the License. You can obtain a copy ++ * in the file LICENSE in the source distribution or at ++ * https://www.openssl.org/source/license.html ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include "crypto/riscv_arch.h" ++ ++void sha256_block_data_order_zvkb_zvknha_or_zvknhb(void *ctx, const void *in, ++ size_t num); ++void sha256_block_data_order_zbb(void *ctx, const void *in, size_t num); ++void sha256_block_data_order_riscv64(void *ctx, const void *in, size_t num); ++void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num); ++ ++void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num) ++{ ++ if (RISCV_HAS_ZVKB() && (RISCV_HAS_ZVKNHA() || RISCV_HAS_ZVKNHB()) && ++ riscv_vlen() >= 128) { ++ sha256_block_data_order_zvkb_zvknha_or_zvknhb(ctx, in, num); ++ } else if (RISCV_HAS_ZBB()) { ++ sha256_block_data_order_zbb(ctx, in, num); ++ } else { ++ sha256_block_data_order_riscv64(ctx, in, num); ++ } ++} ++ ++void sha512_block_data_order_zvkb_zvknhb(void *ctx, const void *in, size_t num); ++void sha512_block_data_order_zbb(void *ctx, const void *in, size_t num); ++void sha512_block_data_order_c(void *ctx, const void *in, size_t num); ++void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num); ++ ++void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num) ++{ ++ if (RISCV_HAS_ZVKB_AND_ZVKNHB() && riscv_vlen() >= 128) { ++ sha512_block_data_order_zvkb_zvknhb(ctx, in, num); ++ } else if (RISCV_HAS_ZBB()) { ++ sha512_block_data_order_zbb(ctx, in, num); ++ } else { ++ sha512_block_data_order_c(ctx, in, num); ++ } ++} +-- +2.51.1 + diff --git a/openssl.spec b/openssl.spec index 9867e00..78dac0f 100755 --- a/openssl.spec +++ b/openssl.spec @@ -2,7 +2,7 @@ Name: openssl Epoch: 1 Version: 3.0.12 -Release: 28 +Release: 29 Summary: Cryptography and SSL/TLS Toolkit License: Apache-2.0 URL: https://www.openssl.org/ @@ -85,6 +85,7 @@ Patch6001: Backport-riscv-Further-optimization-for-AES-128-CBC-decryption-perf Patch6002: Backport-Optimize-SM2-foundational-framework-modifications.patch Patch6003: Backport-Add-SM2-implementation-in-generic-riscv64-asm.patch Patch6004: Backport-Implement-Montgomery-multiplication-assembly-optimization-for-RV64GC.patch +Patch6005: Backport-RISC-V-SHA2-assembly-optimization.patch Patch9000: add-FIPS_mode_set-support.patch Patch9001: backport-CVE-2024-9143-Harden-BN_GF2m_poly2arr-against-misuse.patch @@ -299,6 +300,9 @@ make test || : %ldconfig_scriptlets libs %changelog +* Mon Oct 27 2025 Julian Zhu - 1:3.0.12-29 +- Backport SHA2 assembly optimization for RISC-V + * Wed Oct 1 2025 lizhipeng - 1:3.0.12-28 - fix CVE-2025-9230 -- Gitee