blake2b: add AVX assembly

Add an AVX implementation and improve SSE4.1 assembly.

AVX vs SSE4.1
name 		old time/op 		new time/op 	delta
Write128-8 	249ns ± 0% 		220ns ± 0% 	-11.85% (p=0.029 n=4+4)
Write1K-8 	1.68µs ± 1% 		1.56µs ± 1% 	-6.71% (p=0.029 n=4+4)
Write32K-8 	52.6µs ± 0% 		48.7µs ± 0% 	-7.40% (p=0.029 n=4+4)
Sum128-8 	264ns ± 0% 		241ns ± 1% 	-8.52% (p=0.029 n=4+4)
Sum1K-8 	1.70µs ± 0% 		1.57µs ± 0% 	-7.79% (p=0.029 n=4+4)
Sum32K-8 	54.1µs ± 3% 		49.5µs ± 1% 	-8.36% (p=0.029 n=4+4)

name 		old speed 		new speed	 delta
Write128-8 	513MB/s ± 0% 		582MB/s ± 0% 	+13.38% (p=0.029 n=4+4)
Write1K-8 	610MB/s ± 1% 		654MB/s ± 1% 	+7.22% (p=0.029 n=4+4)
Write32K-8 	622MB/s ± 0% 		672MB/s ± 0% 	+7.99% (p=0.029 n=4+4)
Sum128-8 	484MB/s ± 1% 		529MB/s ± 0% 	+9.21% (p=0.029 n=4+4)
Sum1K-8 	602MB/s ± 0% 		653MB/s ± 0% 	+8.42% (p=0.029 n=4+4)
Sum32K-8 	607MB/s ± 3% 		662MB/s ± 1% 	+9.03% (p=0.029 n=4+4)

AVX2 vs AVX
name 		old time/op 		new time/op 	delta
Write128-4 	192ns ± 0% 		166ns ± 0% 	-14.03% (p=0.029 n=4+4)
Write1K-4 	1.37µs ± 0% 		1.19µs ± 0% 	-12.65% (p=0.029 n=4+4)
Write32K-4 	42.5µs ± 0% 		37.3µs ± 0% 	-12.33% (p=0.029 n=4+4)
Sum128-4 	213ns ± 0% 		188ns ± 0% 	-11.97% (p=0.029 n=4+4)
Sum1K-4 	1.40µs ± 0% 		1.22µs ± 0% 	-12.85% (p=0.029 n=4+4)
Sum32K-4 	42.8µs ± 0% 		37.3µs ± 0% 	-12.94% (p=0.029 n=4+4)

name 		old speed 		new speed 	delta
Write128-4 	662MB/s ± 0% 		771MB/s ± 0% 	+16.47% (p=0.029 n=4+4)
Write1K-4 	748MB/s ± 0% 		857MB/s ± 0% 	+14.49% (p=0.029 n=4+4)
Write32K-4 	771MB/s ± 0% 		879MB/s ± 0% 	+14.07% (p=0.029 n=4+4)
Sum128-4 	600MB/s ± 0% 		680MB/s ± 0% 	+13.49% (p=0.029 n=4+4)
Sum1K-4 	733MB/s ± 0% 		841MB/s ± 0% 	+14.72% (p=0.029 n=4+4)
Sum32K-4    	765MB/s ± 0%  		879MB/s ± 0%  	+14.85% (p=0.029 n=4+4)

Change-Id: Idf85742e952c07b76c0c7fb5404ed9b0caf0f6eb
Reviewed-on: https://go-review.googlesource.com/34319
Reviewed-by: Adam Langley <agl@golang.org>
diff --git a/blake2b/blake2bAVX2_amd64.go b/blake2b/blake2bAVX2_amd64.go
index 756bffc..422ba84 100644
--- a/blake2b/blake2bAVX2_amd64.go
+++ b/blake2b/blake2bAVX2_amd64.go
@@ -7,23 +7,32 @@
 package blake2b
 
 var useAVX2 = supportAVX2()
+var useAVX = supportAVX()
 var useSSE4 = supportSSE4()
 
 //go:noescape
 func supportSSE4() bool
 
 //go:noescape
+func supportAVX() bool
+
+//go:noescape
 func supportAVX2() bool
 
 //go:noescape
 func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
 
 //go:noescape
+func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+
+//go:noescape
 func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
 
 func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
 	if useAVX2 {
 		hashBlocksAVX2(h, c, flag, blocks)
+	} else if useAVX {
+		hashBlocksAVX(h, c, flag, blocks)
 	} else if useSSE4 {
 		hashBlocksSSE4(h, c, flag, blocks)
 	} else {
diff --git a/blake2b/blake2bAVX2_amd64.s b/blake2b/blake2bAVX2_amd64.s
index 1703fe4..86bc182 100644
--- a/blake2b/blake2bAVX2_amd64.s
+++ b/blake2b/blake2bAVX2_amd64.s
@@ -6,32 +6,56 @@
 
 #include "textflag.h"
 
+DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
+DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
+DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
+DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
+
+DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
+DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
+DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
+GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
+
+DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
+DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
+DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
+DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
+
+DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
+DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
+DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
+DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
+
 DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
 DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-DATA ·AVX_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
-DATA ·AVX_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $32
+GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
 
-DATA ·AVX_iv1<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·AVX_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-DATA ·AVX_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·AVX_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
-GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $32
+DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
+DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
+
+DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
+DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
+GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
+
+DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
+GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
 
 DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
 DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-DATA ·AVX_c40<>+0x10(SB)/8, $0x0201000706050403
-DATA ·AVX_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $32
+GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
 
 DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
 DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-DATA ·AVX_c48<>+0x10(SB)/8, $0x0100070605040302
-DATA ·AVX_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $32
+GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
 
 // unfortunately the BYTE representation of VPERMQ must be used
-#define ROUND(m0, m1, m2, m3, t, c40, c48) \
+#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
 	VPADDQ  m0, Y0, Y0;                                                       \
 	VPADDQ  Y1, Y0, Y0;                                                       \
 	VPXOR   Y0, Y3, Y3;                                                       \
@@ -72,7 +96,7 @@
 	BYTE    $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 \ // VPERMQ 0x93, Y1, Y1
 
 // load msg into Y12, Y13, Y14, Y15
-#define LOAD_MSG(src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \
+#define LOAD_MSG_AVX2(src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \
 	MOVQ        i0*8(src), X12;      \
 	PINSRQ      $1, i1*8(src), X12;  \
 	MOVQ        i2*8(src), X11;      \
@@ -112,13 +136,13 @@
 	XORQ CX, CX
 	MOVQ CX, 24(SP)
 
-	VMOVDQU ·AVX_c40<>(SB), Y4
-	VMOVDQU ·AVX_c48<>(SB), Y5
+	VMOVDQU ·AVX2_c40<>(SB), Y4
+	VMOVDQU ·AVX2_c48<>(SB), Y5
 
 	VMOVDQU 0(AX), Y8
 	VMOVDQU 32(AX), Y9
-	VMOVDQU ·AVX_iv0<>(SB), Y6
-	VMOVDQU ·AVX_iv1<>(SB), Y7
+	VMOVDQU ·AVX2_iv0<>(SB), Y6
+	VMOVDQU ·AVX2_iv1<>(SB), Y7
 
 	MOVQ 0(BX), R8
 	MOVQ 8(BX), R9
@@ -135,41 +159,41 @@
 noinc:
 	VMOVDQA Y8, Y0
 	VMOVDQA Y9, Y1
-	VMOVDQU Y6, Y2
+	VMOVDQA Y6, Y2
 	VPXOR   0(SP), Y7, Y3
 
-	LOAD_MSG(SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15)
+	LOAD_MSG_AVX2(SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15)
 	VMOVDQA Y12, 32(SP)
 	VMOVDQA Y13, 64(SP)
 	VMOVDQA Y14, 96(SP)
 	VMOVDQA Y15, 128(SP)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3)
 	VMOVDQA Y12, 160(SP)
 	VMOVDQA Y13, 192(SP)
 	VMOVDQA Y14, 224(SP)
 	VMOVDQA Y15, 256(SP)
 
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG(SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0)
-	ROUND(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
+	LOAD_MSG_AVX2(SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0)
+	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
 
-	ROUND(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
-	ROUND(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
+	ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
+	ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
 
 	VPXOR Y0, Y8, Y8
 	VPXOR Y1, Y9, Y9
@@ -189,8 +213,290 @@
 	MOVQ DX, SP
 	RET
 
+// unfortunately the BYTE representation of VPUNPCKLQDQ and VPUNPCKHQDQ must be used
+#define VPUNPCKLQDQ_X8_X8_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x39; BYTE $0x6C; BYTE $0xD0
+#define VPUNPCKHQDQ_X7_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF2
+#define VPUNPCKLQDQ_X7_X7_X10 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xD7
+#define VPUNPCKHQDQ_X8_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xFA
+#define VPUNPCKLQDQ_X3_X3_X10 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xD3
+#define VPUNPCKHQDQ_X2_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD2
+#define VPUNPCKLQDQ_X9_X9_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x31; BYTE $0x6C; BYTE $0xD1
+#define VPUNPCKHQDQ_X3_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDA
+#define VPUNPCKLQDQ_X2_X2_X10 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xD2
+#define VPUNPCKHQDQ_X3_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD2
+#define VPUNPCKHQDQ_X8_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xDA
+#define VPUNPCKHQDQ_X6_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF2
+#define VPUNPCKHQDQ_X7_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFA
+
+// shuffle X2 and X6 using the temp registers X8, X9, X10
+#define SHUFFLE_AVX() \
+	VMOVDQA X4, X9;        \
+	VMOVDQA X5, X4;        \
+	VMOVDQA X9, X5;        \
+	VMOVDQA X6, X8;        \
+	VPUNPCKLQDQ_X8_X8_X10; \
+	VPUNPCKHQDQ_X7_X10_X6; \
+	VPUNPCKLQDQ_X7_X7_X10; \
+	VPUNPCKHQDQ_X8_X10_X7; \
+	VPUNPCKLQDQ_X3_X3_X10; \
+	VMOVDQA X2, X9;        \
+	VPUNPCKHQDQ_X2_X10_X2; \
+	VPUNPCKLQDQ_X9_X9_X10; \
+	VPUNPCKHQDQ_X3_X10_X3; \
+
+// inverse shuffle X2 and X6 using the temp registers X8, X9, X10
+#define SHUFFLE_AVX_INV() \
+	VMOVDQA X4, X9;        \
+	VMOVDQA X5, X4;        \
+	VMOVDQA X9, X5;        \
+	VMOVDQA X2, X8;        \
+	VPUNPCKLQDQ_X2_X2_X10; \
+	VPUNPCKHQDQ_X3_X10_X2; \
+	VPUNPCKLQDQ_X3_X3_X10; \
+	VPUNPCKHQDQ_X8_X10_X3; \
+	VPUNPCKLQDQ_X7_X7_X10; \
+	VMOVDQA X6, X9;        \
+	VPUNPCKHQDQ_X6_X10_X6; \
+	VPUNPCKLQDQ_X9_X9_X10; \
+	VPUNPCKHQDQ_X7_X10_X7; \
+
+#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
+	VPADDQ  m0, v0, v0;   \
+	VPADDQ  v2, v0, v0;   \
+	VPADDQ  m1, v1, v1;   \
+	VPADDQ  v3, v1, v1;   \
+	VPXOR   v0, v6, v6;   \
+	VPXOR   v1, v7, v7;   \
+	VPSHUFD $-79, v6, v6; \
+	VPSHUFD $-79, v7, v7; \
+	VPADDQ  v6, v4, v4;   \
+	VPADDQ  v7, v5, v5;   \
+	VPXOR   v4, v2, v2;   \
+	VPXOR   v5, v3, v3;   \
+	VPSHUFB c40, v2, v2;  \
+	VPSHUFB c40, v3, v3;  \
+	VPADDQ  m2, v0, v0;   \
+	VPADDQ  v2, v0, v0;   \
+	VPADDQ  m3, v1, v1;   \
+	VPADDQ  v3, v1, v1;   \
+	VPXOR   v0, v6, v6;   \
+	VPXOR   v1, v7, v7;   \
+	VPSHUFB c48, v6, v6;  \
+	VPSHUFB c48, v7, v7;  \
+	VPADDQ  v6, v4, v4;   \
+	VPADDQ  v7, v5, v5;   \
+	VPXOR   v4, v2, v2;   \
+	VPXOR   v5, v3, v3;   \
+	VPADDQ  v2, v2, t0;   \
+	VPSRLQ  $63, v2, v2;  \
+	VPXOR   t0, v2, v2;   \
+	VPADDQ  v3, v3, t0;   \
+	VPSRLQ  $63, v3, v3;  \
+	VPXOR   t0, v3, v3
+
+// unfortunately the BYTE representation of VPINSRQ must be used
+#define VPINSRQ_1_R10_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC2; BYTE $0x01
+#define VPINSRQ_1_R11_X9_X9 BYTE $0xC4; BYTE $0x43; BYTE $0xB1; BYTE $0x22; BYTE $0xCB; BYTE $0x01
+#define VPINSRQ_1_R12_X10_X10 BYTE $0xC4; BYTE $0x43; BYTE $0xA9; BYTE $0x22; BYTE $0xD4; BYTE $0x01
+#define VPINSRQ_1_R13_X11_X11 BYTE $0xC4; BYTE $0x43; BYTE $0xA1; BYTE $0x22; BYTE $0xDD; BYTE $0x01
+
+#define VPINSRQ_1_R9_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC1; BYTE $0x01
+
+// load src into X8, X9, X10 and X11 using R10, R11, R12 and R13 for temp registers
+#define LOAD_MSG_AVX(src, i0, i1, i2, i3, i4, i5, i6, i7) \
+	MOVQ i0*8(src), X8;    \
+	MOVQ i1*8(src), R10;   \
+	MOVQ i2*8(src), X9;    \
+	MOVQ i3*8(src), R11;   \
+	MOVQ i4*8(src), X10;   \
+	MOVQ i5*8(src), R12;   \
+	MOVQ i6*8(src), X11;   \
+	MOVQ i7*8(src), R13;   \
+	VPINSRQ_1_R10_X8_X8;   \
+	VPINSRQ_1_R11_X9_X9;   \
+	VPINSRQ_1_R12_X10_X10; \
+	VPINSRQ_1_R13_X11_X11
+
+// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
+TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
+	MOVQ h+0(FP), AX
+	MOVQ c+8(FP), BX
+	MOVQ flag+16(FP), CX
+	MOVQ blocks_base+24(FP), SI
+	MOVQ blocks_len+32(FP), DI
+
+	MOVQ SP, BP
+	MOVQ SP, R9
+	ADDQ $15, R9
+	ANDQ $~15, R9
+	MOVQ R9, SP
+
+	MOVOU ·AVX_c40<>(SB), X13
+	MOVOU ·AVX_c48<>(SB), X14
+
+	VMOVDQU ·AVX_iv3<>(SB), X0
+	VMOVDQA X0, 0(SP)
+	XORQ    CX, 0(SP)          // 0(SP) = ·AVX_iv3 ^ (CX || 0)
+
+	VMOVDQU 0(AX), X12
+	VMOVDQU 16(AX), X15
+	VMOVDQU 32(AX), X2
+	VMOVDQU 48(AX), X3
+
+	MOVQ 0(BX), R8
+	MOVQ 8(BX), R9
+
+loop:
+	ADDQ $128, R8
+	CMPQ R8, $128
+	JGE  noinc
+	INCQ R9
+
+noinc:
+	MOVQ   R8, X8
+	VPINSRQ_1_R9_X8_X8
+
+	VMOVDQA X12, X0
+	VMOVDQA X15, X1
+	VMOVDQU ·AVX_iv0<>(SB), X4
+	VMOVDQU ·AVX_iv1<>(SB), X5
+	VMOVDQU ·AVX_iv2<>(SB), X6
+
+	VPXOR   X8, X6, X6
+	VMOVDQA 0(SP), X7
+
+	LOAD_MSG_AVX(SI, 0, 2, 4, 6, 1, 3, 5, 7)
+	VMOVDQA X8, 16(SP)
+	VMOVDQA X9, 32(SP)
+	VMOVDQA X10, 48(SP)
+	VMOVDQA X11, 64(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 8, 10, 12, 14, 9, 11, 13, 15)
+	VMOVDQA X8, 80(SP)
+	VMOVDQA X9, 96(SP)
+	VMOVDQA X10, 112(SP)
+	VMOVDQA X11, 128(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 14, 4, 9, 13, 10, 8, 15, 6)
+	VMOVDQA X8, 144(SP)
+	VMOVDQA X9, 160(SP)
+	VMOVDQA X10, 176(SP)
+	VMOVDQA X11, 192(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 1, 0, 11, 5, 12, 2, 7, 3)
+	VMOVDQA X8, 208(SP)
+	VMOVDQA X9, 224(SP)
+	VMOVDQA X10, 240(SP)
+	VMOVDQA X11, 256(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 11, 12, 5, 15, 8, 0, 2, 13)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 10, 3, 7, 9, 14, 6, 1, 4)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 7, 3, 13, 11, 9, 1, 12, 14)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 2, 5, 4, 15, 6, 10, 0, 8)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 9, 5, 2, 10, 0, 7, 4, 15)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 14, 11, 6, 3, 1, 12, 8, 13)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 2, 6, 0, 8, 12, 10, 11, 3)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 4, 7, 15, 1, 13, 5, 14, 9)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 12, 1, 14, 4, 5, 15, 13, 10)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 0, 6, 9, 8, 7, 3, 2, 11)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 13, 7, 12, 3, 11, 14, 1, 9)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 5, 15, 8, 2, 0, 4, 6, 10)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 6, 14, 11, 0, 15, 9, 3, 8)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 12, 13, 1, 10, 2, 7, 4, 5)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	LOAD_MSG_AVX(SI, 10, 8, 7, 1, 2, 4, 6, 5)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX()
+	LOAD_MSG_AVX(SI, 15, 9, 3, 13, 11, 14, 12, 0)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
+	SHUFFLE_AVX()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
+	SHUFFLE_AVX()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
+	SHUFFLE_AVX_INV()
+
+	VMOVDQU 32(AX), X10
+	VMOVDQU 48(AX), X11
+	VPXOR   X0, X12, X12
+	VPXOR   X1, X15, X15
+	VPXOR   X2, X10, X10
+	VPXOR   X3, X11, X11
+	VPXOR   X4, X12, X12
+	VPXOR   X5, X15, X15
+	VPXOR   X6, X10, X2
+	VPXOR   X7, X11, X3
+	VMOVDQU X2, 32(AX)
+	VMOVDQU X3, 48(AX)
+
+	LEAQ 128(SI), SI
+	SUBQ $128, DI
+	JNE  loop
+
+	VMOVDQU X12, 0(AX)
+	VMOVDQU X15, 16(AX)
+
+	MOVQ R8, 0(BX)
+	MOVQ R9, 8(BX)
+
+	VZEROUPPER
+
+	MOVQ BP, SP
+	RET
+
 // func supportAVX2() bool
 TEXT ·supportAVX2(SB), 4, $0-1
 	MOVQ runtime·support_avx2(SB), AX
 	MOVB AX, ret+0(FP)
 	RET
+
+// func supportAVX() bool
+TEXT ·supportAVX(SB), 4, $0-1
+	MOVQ runtime·support_avx(SB), AX
+	MOVB AX, ret+0(FP)
+	RET
diff --git a/blake2b/blake2b_amd64.go b/blake2b/blake2b_amd64.go
index f11dcbc..c9a22fd 100644
--- a/blake2b/blake2b_amd64.go
+++ b/blake2b/blake2b_amd64.go
@@ -7,6 +7,7 @@
 package blake2b
 
 var useAVX2 = false
+var useAVX = false
 var useSSE4 = supportSSE4()
 
 //go:noescape
diff --git a/blake2b/blake2b_amd64.s b/blake2b/blake2b_amd64.s
index 95aec80..04e45be 100644
--- a/blake2b/blake2b_amd64.s
+++ b/blake2b/blake2b_amd64.s
@@ -30,16 +30,16 @@
 DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
 GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
 
-#define SHUFFLE(v2, v3, v4, v5, v6, v7, t0, t1, t2) \
-	MOVO       v4, t0; \
+#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
+	MOVO       v4, t1; \
 	MOVO       v5, v4; \
-	MOVO       t0, v5; \
-	MOVO       v6, t0; \
+	MOVO       t1, v5; \
+	MOVO       v6, t1; \
 	PUNPCKLQDQ v6, t2; \
 	PUNPCKHQDQ v7, v6; \
 	PUNPCKHQDQ t2, v6; \
 	PUNPCKLQDQ v7, t2; \
-	MOVO       t0, v7; \
+	MOVO       t1, v7; \
 	MOVO       v2, t1; \
 	PUNPCKHQDQ t2, v7; \
 	PUNPCKLQDQ v3, t2; \
@@ -47,16 +47,16 @@
 	PUNPCKLQDQ t1, t2; \
 	PUNPCKHQDQ t2, v3
 
-#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t0, t1, t2) \
-	MOVO       v4, t0; \
+#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
+	MOVO       v4, t1; \
 	MOVO       v5, v4; \
-	MOVO       t0, v5; \
-	MOVO       v2, t0; \
+	MOVO       t1, v5; \
+	MOVO       v2, t1; \
 	PUNPCKLQDQ v2, t2; \
 	PUNPCKHQDQ v3, v2; \
 	PUNPCKHQDQ t2, v2; \
 	PUNPCKLQDQ v3, t2; \
-	MOVO       t0, v3; \
+	MOVO       t1, v3; \
 	MOVO       v6, t1; \
 	PUNPCKHQDQ t2, v3; \
 	PUNPCKLQDQ v7, t2; \
@@ -64,7 +64,7 @@
 	PUNPCKLQDQ t1, t2; \
 	PUNPCKHQDQ t2, v7
 
-#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, t1, t2, c40, c48) \
+#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
 	PADDQ  m0, v0;        \
 	PADDQ  m1, v1;        \
 	PADDQ  v2, v0;        \
@@ -91,14 +91,14 @@
 	PADDQ  v7, v5;        \
 	PXOR   v4, v2;        \
 	PXOR   v5, v3;        \
-	MOVOU  v2, t2;        \
-	PADDQ  v2, t2;        \
+	MOVOU  v2, t0;        \
+	PADDQ  v2, t0;        \
 	PSRLQ  $63, v2;       \
-	PXOR   t2, v2;        \
-	MOVOU  v3, t2;        \
-	PADDQ  v3, t2;        \
+	PXOR   t0, v2;        \
+	MOVOU  v3, t0;        \
+	PADDQ  v3, t0;        \
 	PSRLQ  $63, v3;       \
-	PXOR   t2, v3
+	PXOR   t0, v3
 
 #define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
 	MOVQ   i0*8(src), m0;     \
@@ -111,7 +111,7 @@
 	PINSRQ $1, i7*8(src), m3
 
 // func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksSSE4(SB), 4, $32-48 // frame size = 16 + 16 byte alignment
+TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
 	MOVQ h+0(FP), AX
 	MOVQ c+8(FP), BX
 	MOVQ flag+16(FP), CX
@@ -131,6 +131,9 @@
 	MOVOU ·c40<>(SB), X13
 	MOVOU ·c48<>(SB), X14
 
+	MOVOU 0(AX), X12
+	MOVOU 16(AX), X15
+
 	MOVQ 0(BX), R8
 	MOVQ 8(BX), R9
 
@@ -141,118 +144,126 @@
 	INCQ R9
 
 noinc:
-	MOVQ R8, X15
-	PINSRQ $1, R9, X15
+	MOVQ R8, X8
+	PINSRQ $1, R9, X8
 
-	MOVOU 0(AX), X0
-	MOVOU 16(AX), X1
+	MOVO X12, X0
+	MOVO X15, X1
 	MOVOU 32(AX), X2
 	MOVOU 48(AX), X3
 	MOVOU ·iv0<>(SB), X4
 	MOVOU ·iv1<>(SB), X5
 	MOVOU ·iv2<>(SB), X6
 
-	PXOR X15, X6
+	PXOR X8, X6
 	MOVO 0(SP), X7
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	MOVO X8, 16(SP)
+	MOVO X9, 32(SP)
+	MOVO X10, 48(SP)
+	MOVO X11, 64(SP)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	MOVO X8, 80(SP)
+	MOVO X9, 96(SP)
+	MOVO X10, 112(SP)
+	MOVO X11, 128(SP)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	MOVO X8, 144(SP)
+	MOVO X9, 160(SP)
+	MOVO X10, 176(SP)
+	MOVO X11, 192(SP)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	MOVO X8, 208(SP)
+	MOVO X9, 224(SP)
+	MOVO X10, 240(SP)
+	MOVO X11, 256(SP)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
 	LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
 	LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
-	LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
-	LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
-	LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9, X10)
-	LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
-	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X8, X9, X12, X13, X14)
-	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9, X10)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
+	SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
+	HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
+	SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
 
-	MOVOU 0(AX), X8
-	MOVOU 16(AX), X9
 	MOVOU 32(AX), X10
 	MOVOU 48(AX), X11
-	PXOR  X0, X8
-	PXOR  X1, X9
+	PXOR  X0, X12
+	PXOR  X1, X15
 	PXOR  X2, X10
 	PXOR  X3, X11
-	PXOR  X4, X8
-	PXOR  X5, X9
+	PXOR  X4, X12
+	PXOR  X5, X15
 	PXOR  X6, X10
 	PXOR  X7, X11
-	MOVOU X8, 0(AX)
-	MOVOU X9, 16(AX)
 	MOVOU X10, 32(AX)
 	MOVOU X11, 48(AX)
 
@@ -260,7 +271,11 @@
 	SUBQ $128, DI
 	JNE  loop
 
-	MOVOU X15, 0(BX)
+	MOVOU X12, 0(AX)
+	MOVOU X15, 16(AX)
+
+	MOVQ R8, 0(BX)
+	MOVQ R9, 8(BX)
 
 	MOVQ BP, SP
 	RET
diff --git a/blake2b/blake2b_test.go b/blake2b/blake2b_test.go
index 4607fda..a38fceb 100644
--- a/blake2b/blake2b_test.go
+++ b/blake2b/blake2b_test.go
@@ -21,15 +21,20 @@
 }
 
 func TestHashes(t *testing.T) {
-	defer func(sse4, avx2 bool) {
-		useSSE4, useAVX2 = sse4, avx2
-	}(useSSE4, useAVX2)
+	defer func(sse4, avx, avx2 bool) {
+		useSSE4, useAVX, useAVX2 = sse4, useAVX, avx2
+	}(useSSE4, useAVX, useAVX2)
 
 	if useAVX2 {
 		t.Log("AVX2 version")
 		testHashes(t)
 		useAVX2 = false
 	}
+	if useAVX {
+		t.Log("AVX version")
+		testHashes(t)
+		useAVX = false
+	}
 	if useSSE4 {
 		t.Log("SSE4 version")
 		testHashes(t)