blake2b: fix AVX performance problems on amd64

On some amd64 CPUs (Xeon E5-2680v4 / E5-2620v3) using SSE and AVX instructions
leads to very low performance.
On a i7-6500U the SSE-AVX code performs following:

AVX2:
name        time/op
Write128-4    165ns ± 0%
Write1K-4    1.20µs ± 0%
Sum128-4      189ns ± 1%
Sum1K-4      1.22µs ± 0%

name        speed
Write128-4  773MB/s ± 1%
Write1K-4   855MB/s ± 0%
Sum128-4    675MB/s ± 1%
Sum1K-4     838MB/s ± 0%

while the same code achieves values < 65MB/s on a Xeon E5-2620v3.

Replacing the `MOVQ` and `PINSRQ` with the AVX instructions `VMOVQ` and `VPINSRQ`
increases the performance of the AVX/AVX2 code to some expected values:

name         old time/op    new time/op     delta
Write128-12    2.20µs ±10%     0.22µs ± 9%    -90.00%  (p=0.029 n=4+4)
Write1K-12     16.2µs ± 0%      1.1µs ± 0%    -93.07%  (p=0.029 n=4+4)
Sum128-12      2.10µs ± 0%     0.22µs ± 0%    -89.47%  (p=0.029 n=4+4)
Sum1K-12       16.3µs ± 0%      1.2µs ± 0%    -92.65%  (p=0.029 n=4+4)

name         old speed      new speed       delta
Write128-12  58.5MB/s ±10%  582.8MB/s ±10%   +897.08%  (p=0.029 n=4+4)
Write1K-12   63.1MB/s ± 0%  909.8MB/s ± 0%  +1341.40%  (p=0.029 n=4+4)
Sum128-12    60.8MB/s ± 0%  576.3MB/s ± 0%   +847.84%  (p=0.029 n=4+4)
Sum1K-12     62.8MB/s ± 0%  855.2MB/s ± 0%  +1260.78%  (p=0.029 n=4+4)

The AVX/AVX2 code now uses only AVX (no SSE) instructions.

Fixes golang/go#18563.

Change-Id: I1961dd8fa02014642587523b7f099816a263c9f5
Reviewed-on: https://go-review.googlesource.com/34993
Reviewed-by: Adam Langley <agl@golang.org>
diff --git a/blake2b/blake2bAVX2_amd64.s b/blake2b/blake2bAVX2_amd64.s
index 96a51d5..784bce6 100644
--- a/blake2b/blake2bAVX2_amd64.s
+++ b/blake2b/blake2bAVX2_amd64.s
@@ -54,68 +54,223 @@
 DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
 GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
 
-// unfortunately the BYTE representation of VPERMQ must be used
-#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
-	VPADDQ  m0, Y0, Y0;                                                       \
-	VPADDQ  Y1, Y0, Y0;                                                       \
-	VPXOR   Y0, Y3, Y3;                                                       \
-	VPSHUFD $-79, Y3, Y3;                                                     \
-	VPADDQ  Y3, Y2, Y2;                                                       \
-	VPXOR   Y2, Y1, Y1;                                                       \
-	VPSHUFB c40, Y1, Y1;                                                      \
-	VPADDQ  m1, Y0, Y0;                                                       \
-	VPADDQ  Y1, Y0, Y0;                                                       \
-	VPXOR   Y0, Y3, Y3;                                                       \
-	VPSHUFB c48, Y3, Y3;                                                      \
-	VPADDQ  Y3, Y2, Y2;                                                       \
-	VPXOR   Y2, Y1, Y1;                                                       \
-	VPADDQ  Y1, Y1, t;                                                        \
-	VPSRLQ  $63, Y1, Y1;                                                      \
-	VPXOR   t, Y1, Y1;                                                        \
-	BYTE    $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 \ // VPERMQ 0x39, Y1, Y1
-	BYTE    $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2
-	BYTE    $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 \ // VPERMQ 0x93, Y3, Y3
-	VPADDQ  m2, Y0, Y0;                                                       \
-	VPADDQ  Y1, Y0, Y0;                                                       \
-	VPXOR   Y0, Y3, Y3;                                                       \
-	VPSHUFD $-79, Y3, Y3;                                                     \
-	VPADDQ  Y3, Y2, Y2;                                                       \
-	VPXOR   Y2, Y1, Y1;                                                       \
-	VPSHUFB c40, Y1, Y1;                                                      \
-	VPADDQ  m3, Y0, Y0;                                                       \
-	VPADDQ  Y1, Y0, Y0;                                                       \
-	VPXOR   Y0, Y3, Y3;                                                       \
-	VPSHUFB c48, Y3, Y3;                                                      \
-	VPADDQ  Y3, Y2, Y2;                                                       \
-	VPXOR   Y2, Y1, Y1;                                                       \
-	VPADDQ  Y1, Y1, t;                                                        \
-	VPSRLQ  $63, Y1, Y1;                                                      \
-	VPXOR   t, Y1, Y1;                                                        \
-	BYTE    $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 \ // VPERMQ 0x39, Y3, Y3
-	BYTE    $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e \ // VPERMQ 0x4e, Y2, Y2
-	BYTE    $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 \ // VPERMQ 0x93, Y1, Y1
+#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
+#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
+#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
+#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
+#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
 
-// load msg into Y12, Y13, Y14, Y15
-#define LOAD_MSG_AVX2(src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \
-	MOVQ        i0*8(src), X12;      \
-	PINSRQ      $1, i1*8(src), X12;  \
-	MOVQ        i2*8(src), X11;      \
-	PINSRQ      $1, i3*8(src), X11;  \
-	VINSERTI128 $1, X11, Y12, Y12;   \
-	MOVQ        i4*8(src), X13;      \
-	PINSRQ      $1, i5*8(src), X13;  \
-	MOVQ        i6*8(src), X11;      \
-	PINSRQ      $1, i7*8(src), X11;  \
-	VINSERTI128 $1, X11, Y13, Y13;   \
-	MOVQ        i8*8(src), X14;      \
-	PINSRQ      $1, i9*8(src), X14;  \
-	MOVQ        i10*8(src), X11;     \
-	PINSRQ      $1, i11*8(src), X11; \
+#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
+	VPADDQ  m0, Y0, Y0;   \
+	VPADDQ  Y1, Y0, Y0;   \
+	VPXOR   Y0, Y3, Y3;   \
+	VPSHUFD $-79, Y3, Y3; \
+	VPADDQ  Y3, Y2, Y2;   \
+	VPXOR   Y2, Y1, Y1;   \
+	VPSHUFB c40, Y1, Y1;  \
+	VPADDQ  m1, Y0, Y0;   \
+	VPADDQ  Y1, Y0, Y0;   \
+	VPXOR   Y0, Y3, Y3;   \
+	VPSHUFB c48, Y3, Y3;  \
+	VPADDQ  Y3, Y2, Y2;   \
+	VPXOR   Y2, Y1, Y1;   \
+	VPADDQ  Y1, Y1, t;    \
+	VPSRLQ  $63, Y1, Y1;  \
+	VPXOR   t, Y1, Y1;    \
+	VPERMQ_0x39_Y1_Y1;    \
+	VPERMQ_0x4E_Y2_Y2;    \
+	VPERMQ_0x93_Y3_Y3;    \
+	VPADDQ  m2, Y0, Y0;   \
+	VPADDQ  Y1, Y0, Y0;   \
+	VPXOR   Y0, Y3, Y3;   \
+	VPSHUFD $-79, Y3, Y3; \
+	VPADDQ  Y3, Y2, Y2;   \
+	VPXOR   Y2, Y1, Y1;   \
+	VPSHUFB c40, Y1, Y1;  \
+	VPADDQ  m3, Y0, Y0;   \
+	VPADDQ  Y1, Y0, Y0;   \
+	VPXOR   Y0, Y3, Y3;   \
+	VPSHUFB c48, Y3, Y3;  \
+	VPADDQ  Y3, Y2, Y2;   \
+	VPXOR   Y2, Y1, Y1;   \
+	VPADDQ  Y1, Y1, t;    \
+	VPSRLQ  $63, Y1, Y1;  \
+	VPXOR   t, Y1, Y1;    \
+	VPERMQ_0x39_Y3_Y3;    \
+	VPERMQ_0x4E_Y2_Y2;    \
+	VPERMQ_0x93_Y1_Y1
+
+#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
+#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
+#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
+#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
+#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
+
+#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
+#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
+#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
+#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
+#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
+
+#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
+#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
+#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
+#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
+#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
+
+#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
+#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
+
+#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
+#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
+
+// load msg: Y12 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
+	VMOVQ_SI_X12(i0*8);           \
+	VMOVQ_SI_X11(i2*8);           \
+	VPINSRQ_1_SI_X12(i1*8);       \
+	VPINSRQ_1_SI_X11(i3*8);       \
+	VINSERTI128 $1, X11, Y12, Y12
+
+// load msg: Y13 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
+	VMOVQ_SI_X13(i0*8);           \
+	VMOVQ_SI_X11(i2*8);           \
+	VPINSRQ_1_SI_X13(i1*8);       \
+	VPINSRQ_1_SI_X11(i3*8);       \
+	VINSERTI128 $1, X11, Y13, Y13
+
+// load msg: Y14 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
+	VMOVQ_SI_X14(i0*8);           \
+	VMOVQ_SI_X11(i2*8);           \
+	VPINSRQ_1_SI_X14(i1*8);       \
+	VPINSRQ_1_SI_X11(i3*8);       \
+	VINSERTI128 $1, X11, Y14, Y14
+
+// load msg: Y15 = (i0, i1, i2, i3)
+// i0, i1, i2, i3 must not be 0
+#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
+	VMOVQ_SI_X15(i0*8);           \
+	VMOVQ_SI_X11(i2*8);           \
+	VPINSRQ_1_SI_X15(i1*8);       \
+	VPINSRQ_1_SI_X11(i3*8);       \
+	VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
+	VMOVQ_SI_X12_0;                   \
+	VMOVQ_SI_X11(4*8);                \
+	VPINSRQ_1_SI_X12(2*8);            \
+	VPINSRQ_1_SI_X11(6*8);            \
+	VINSERTI128 $1, X11, Y12, Y12;    \
+	LOAD_MSG_AVX2_Y13(1, 3, 5, 7);    \
+	LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
+	LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
+
+#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
+	LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
+	LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
+	VMOVQ_SI_X11(11*8);              \
+	VPSHUFD     $0x4E, 0*8(SI), X14; \
+	VPINSRQ_1_SI_X11(5*8);           \
 	VINSERTI128 $1, X11, Y14, Y14;   \
-	MOVQ        i12*8(src), X15;     \
-	PINSRQ      $1, i13*8(src), X15; \
-	MOVQ        i14*8(src), X11;     \
-	PINSRQ      $1, i15*8(src), X11; \
+	LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
+
+#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
+	VMOVQ_SI_X11(5*8);              \
+	VMOVDQU     11*8(SI), X12;      \
+	VPINSRQ_1_SI_X11(15*8);         \
+	VINSERTI128 $1, X11, Y12, Y12;  \
+	VMOVQ_SI_X13(8*8);              \
+	VMOVQ_SI_X11(2*8);              \
+	VPINSRQ_1_SI_X13_0;             \
+	VPINSRQ_1_SI_X11(13*8);         \
+	VINSERTI128 $1, X11, Y13, Y13;  \
+	LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
+	LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
+
+#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
+	LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
+	LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
+	LOAD_MSG_AVX2_Y14(2, 5, 4, 15);  \
+	VMOVQ_SI_X15(6*8);               \
+	VMOVQ_SI_X11_0;                  \
+	VPINSRQ_1_SI_X15(10*8);          \
+	VPINSRQ_1_SI_X11(8*8);           \
+	VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
+	LOAD_MSG_AVX2_Y12(9, 5, 2, 10);  \
+	VMOVQ_SI_X13_0;                  \
+	VMOVQ_SI_X11(4*8);               \
+	VPINSRQ_1_SI_X13(7*8);           \
+	VPINSRQ_1_SI_X11(15*8);          \
+	VINSERTI128 $1, X11, Y13, Y13;   \
+	LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
+	LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
+
+#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
+	VMOVQ_SI_X12(2*8);                \
+	VMOVQ_SI_X11_0;                   \
+	VPINSRQ_1_SI_X12(6*8);            \
+	VPINSRQ_1_SI_X11(8*8);            \
+	VINSERTI128 $1, X11, Y12, Y12;    \
+	LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
+	LOAD_MSG_AVX2_Y14(4, 7, 15, 1);   \
+	LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
+
+#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
+	LOAD_MSG_AVX2_Y12(12, 1, 14, 4);  \
+	LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
+	VMOVQ_SI_X14_0;                   \
+	VPSHUFD     $0x4E, 8*8(SI), X11;  \
+	VPINSRQ_1_SI_X14(6*8);            \
+	VINSERTI128 $1, X11, Y14, Y14;    \
+	LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
+
+#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
+	LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
+	LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
+	LOAD_MSG_AVX2_Y14(5, 15, 8, 2);  \
+	VMOVQ_SI_X15_0;                  \
+	VMOVQ_SI_X11(6*8);               \
+	VPINSRQ_1_SI_X15(4*8);           \
+	VPINSRQ_1_SI_X11(10*8);          \
+	VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
+	VMOVQ_SI_X12(6*8);              \
+	VMOVQ_SI_X11(11*8);             \
+	VPINSRQ_1_SI_X12(14*8);         \
+	VPINSRQ_1_SI_X11_0;             \
+	VINSERTI128 $1, X11, Y12, Y12;  \
+	LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
+	VMOVQ_SI_X11(1*8);              \
+	VMOVDQU     12*8(SI), X14;      \
+	VPINSRQ_1_SI_X11(10*8);         \
+	VINSERTI128 $1, X11, Y14, Y14;  \
+	VMOVQ_SI_X15(2*8);              \
+	VMOVDQU     4*8(SI), X11;       \
+	VPINSRQ_1_SI_X15(7*8);          \
+	VINSERTI128 $1, X11, Y15, Y15
+
+#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
+	LOAD_MSG_AVX2_Y12(10, 8, 7, 1);  \
+	VMOVQ_SI_X13(2*8);               \
+	VPSHUFD     $0x4E, 5*8(SI), X11; \
+	VPINSRQ_1_SI_X13(4*8);           \
+	VINSERTI128 $1, X11, Y13, Y13;   \
+	LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
+	VMOVQ_SI_X15(11*8);              \
+	VMOVQ_SI_X11(12*8);              \
+	VPINSRQ_1_SI_X15(14*8);          \
+	VPINSRQ_1_SI_X11_0;              \
 	VINSERTI128 $1, X11, Y15, Y15
 
 // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
@@ -162,34 +317,34 @@
 	VMOVDQA Y6, Y2
 	VPXOR   0(SP), Y7, Y3
 
-	LOAD_MSG_AVX2(SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15)
+	LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
 	VMOVDQA Y12, 32(SP)
 	VMOVDQA Y13, 64(SP)
 	VMOVDQA Y14, 96(SP)
 	VMOVDQA Y15, 128(SP)
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3)
+	LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
 	VMOVDQA Y12, 160(SP)
 	VMOVDQA Y13, 192(SP)
 	VMOVDQA Y14, 224(SP)
 	VMOVDQA Y15, 256(SP)
 
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4)
+	LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8)
+	LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13)
+	LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9)
+	LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11)
+	LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10)
+	LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5)
+	LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-	LOAD_MSG_AVX2(SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0)
+	LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
 	ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
 
 	ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
@@ -209,56 +364,55 @@
 
 	VMOVDQU Y8, 0(AX)
 	VMOVDQU Y9, 32(AX)
+	VZEROUPPER
 
 	MOVQ DX, SP
 	RET
 
-// unfortunately the BYTE representation of VPUNPCKLQDQ and VPUNPCKHQDQ must be used
-#define VPUNPCKLQDQ_X8_X8_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x39; BYTE $0x6C; BYTE $0xD0
-#define VPUNPCKHQDQ_X7_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF2
-#define VPUNPCKLQDQ_X7_X7_X10 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xD7
-#define VPUNPCKHQDQ_X8_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xFA
-#define VPUNPCKLQDQ_X3_X3_X10 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xD3
-#define VPUNPCKHQDQ_X2_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD2
-#define VPUNPCKLQDQ_X9_X9_X10 BYTE $0xC4; BYTE $0x41; BYTE $0x31; BYTE $0x6C; BYTE $0xD1
-#define VPUNPCKHQDQ_X3_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDA
-#define VPUNPCKLQDQ_X2_X2_X10 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xD2
-#define VPUNPCKHQDQ_X3_X10_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD2
-#define VPUNPCKHQDQ_X8_X10_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x39; BYTE $0x6D; BYTE $0xDA
-#define VPUNPCKHQDQ_X6_X10_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF2
-#define VPUNPCKHQDQ_X7_X10_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFA
+#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
+#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
+#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
+#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
+#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
 
-// shuffle X2 and X6 using the temp registers X8, X9, X10
+#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
+#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
+#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
+#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
+#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
+#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
+#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
+#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
+
 #define SHUFFLE_AVX() \
-	VMOVDQA X4, X9;        \
-	VMOVDQA X5, X4;        \
-	VMOVDQA X9, X5;        \
-	VMOVDQA X6, X8;        \
-	VPUNPCKLQDQ_X8_X8_X10; \
-	VPUNPCKHQDQ_X7_X10_X6; \
-	VPUNPCKLQDQ_X7_X7_X10; \
-	VPUNPCKHQDQ_X8_X10_X7; \
-	VPUNPCKLQDQ_X3_X3_X10; \
-	VMOVDQA X2, X9;        \
-	VPUNPCKHQDQ_X2_X10_X2; \
-	VPUNPCKLQDQ_X9_X9_X10; \
-	VPUNPCKHQDQ_X3_X10_X3; \
+	VMOVDQA X6, X13;         \
+	VMOVDQA X2, X14;         \
+	VMOVDQA X4, X6;          \
+	VPUNPCKLQDQ_X13_X13_X15; \
+	VMOVDQA X5, X4;          \
+	VMOVDQA X6, X5;          \
+	VPUNPCKHQDQ_X15_X7_X6;   \
+	VPUNPCKLQDQ_X7_X7_X15;   \
+	VPUNPCKHQDQ_X15_X13_X7;  \
+	VPUNPCKLQDQ_X3_X3_X15;   \
+	VPUNPCKHQDQ_X15_X2_X2;   \
+	VPUNPCKLQDQ_X14_X14_X15; \
+	VPUNPCKHQDQ_X15_X3_X3;   \
 
-// inverse shuffle X2 and X6 using the temp registers X8, X9, X10
 #define SHUFFLE_AVX_INV() \
-	VMOVDQA X4, X9;        \
-	VMOVDQA X5, X4;        \
-	VMOVDQA X9, X5;        \
-	VMOVDQA X2, X8;        \
-	VPUNPCKLQDQ_X2_X2_X10; \
-	VPUNPCKHQDQ_X3_X10_X2; \
-	VPUNPCKLQDQ_X3_X3_X10; \
-	VPUNPCKHQDQ_X8_X10_X3; \
-	VPUNPCKLQDQ_X7_X7_X10; \
-	VMOVDQA X6, X9;        \
-	VPUNPCKHQDQ_X6_X10_X6; \
-	VPUNPCKLQDQ_X9_X9_X10; \
-	VPUNPCKHQDQ_X7_X10_X7; \
+	VMOVDQA X2, X13;         \
+	VMOVDQA X4, X14;         \
+	VPUNPCKLQDQ_X2_X2_X15;   \
+	VMOVDQA X5, X4;          \
+	VPUNPCKHQDQ_X15_X3_X2;   \
+	VMOVDQA X14, X5;         \
+	VPUNPCKLQDQ_X3_X3_X15;   \
+	VMOVDQA X6, X14;         \
+	VPUNPCKHQDQ_X15_X13_X3;  \
+	VPUNPCKLQDQ_X7_X7_X15;   \
+	VPUNPCKHQDQ_X15_X6_X6;   \
+	VPUNPCKLQDQ_X14_X14_X15; \
+	VPUNPCKHQDQ_X15_X7_X7;   \
 
 #define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
 	VPADDQ  m0, v0, v0;   \
@@ -294,28 +448,133 @@
 	VPSRLQ  $63, v3, v3;  \
 	VPXOR   t0, v3, v3
 
-// unfortunately the BYTE representation of VPINSRQ must be used
-#define VPINSRQ_1_R10_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC2; BYTE $0x01
-#define VPINSRQ_1_R11_X9_X9 BYTE $0xC4; BYTE $0x43; BYTE $0xB1; BYTE $0x22; BYTE $0xCB; BYTE $0x01
-#define VPINSRQ_1_R12_X10_X10 BYTE $0xC4; BYTE $0x43; BYTE $0xA9; BYTE $0x22; BYTE $0xD4; BYTE $0x01
-#define VPINSRQ_1_R13_X11_X11 BYTE $0xC4; BYTE $0x43; BYTE $0xA1; BYTE $0x22; BYTE $0xDD; BYTE $0x01
+// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
+// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
+#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
+	VMOVQ_SI_X12(i0*8);     \
+	VMOVQ_SI_X13(i2*8);     \
+	VMOVQ_SI_X14(i4*8);     \
+	VMOVQ_SI_X15(i6*8);     \
+	VPINSRQ_1_SI_X12(i1*8); \
+	VPINSRQ_1_SI_X13(i3*8); \
+	VPINSRQ_1_SI_X14(i5*8); \
+	VPINSRQ_1_SI_X15(i7*8)
 
-#define VPINSRQ_1_R9_X8_X8 BYTE $0xC4; BYTE $0x43; BYTE $0xB9; BYTE $0x22; BYTE $0xC1; BYTE $0x01
+// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
+#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
+	VMOVQ_SI_X12_0;        \
+	VMOVQ_SI_X13(4*8);     \
+	VMOVQ_SI_X14(1*8);     \
+	VMOVQ_SI_X15(5*8);     \
+	VPINSRQ_1_SI_X12(2*8); \
+	VPINSRQ_1_SI_X13(6*8); \
+	VPINSRQ_1_SI_X14(3*8); \
+	VPINSRQ_1_SI_X15(7*8)
 
-// load src into X8, X9, X10 and X11 using R10, R11, R12 and R13 for temp registers
-#define LOAD_MSG_AVX(src, i0, i1, i2, i3, i4, i5, i6, i7) \
-	MOVQ i0*8(src), X8;    \
-	MOVQ i1*8(src), R10;   \
-	MOVQ i2*8(src), X9;    \
-	MOVQ i3*8(src), R11;   \
-	MOVQ i4*8(src), X10;   \
-	MOVQ i5*8(src), R12;   \
-	MOVQ i6*8(src), X11;   \
-	MOVQ i7*8(src), R13;   \
-	VPINSRQ_1_R10_X8_X8;   \
-	VPINSRQ_1_R11_X9_X9;   \
-	VPINSRQ_1_R12_X10_X10; \
-	VPINSRQ_1_R13_X11_X11
+// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
+#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
+	VPSHUFD $0x4E, 0*8(SI), X12; \
+	VMOVQ_SI_X13(11*8);          \
+	VMOVQ_SI_X14(12*8);          \
+	VMOVQ_SI_X15(7*8);           \
+	VPINSRQ_1_SI_X13(5*8);       \
+	VPINSRQ_1_SI_X14(2*8);       \
+	VPINSRQ_1_SI_X15(3*8)
+
+// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
+#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
+	VMOVDQU 11*8(SI), X12;  \
+	VMOVQ_SI_X13(5*8);      \
+	VMOVQ_SI_X14(8*8);      \
+	VMOVQ_SI_X15(2*8);      \
+	VPINSRQ_1_SI_X13(15*8); \
+	VPINSRQ_1_SI_X14_0;     \
+	VPINSRQ_1_SI_X15(13*8)
+
+// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
+#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
+	VMOVQ_SI_X12(2*8);      \
+	VMOVQ_SI_X13(4*8);      \
+	VMOVQ_SI_X14(6*8);      \
+	VMOVQ_SI_X15_0;         \
+	VPINSRQ_1_SI_X12(5*8);  \
+	VPINSRQ_1_SI_X13(15*8); \
+	VPINSRQ_1_SI_X14(10*8); \
+	VPINSRQ_1_SI_X15(8*8)
+
+// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
+#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
+	VMOVQ_SI_X12(9*8);      \
+	VMOVQ_SI_X13(2*8);      \
+	VMOVQ_SI_X14_0;         \
+	VMOVQ_SI_X15(4*8);      \
+	VPINSRQ_1_SI_X12(5*8);  \
+	VPINSRQ_1_SI_X13(10*8); \
+	VPINSRQ_1_SI_X14(7*8);  \
+	VPINSRQ_1_SI_X15(15*8)
+
+// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
+#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
+	VMOVQ_SI_X12(2*8);      \
+	VMOVQ_SI_X13_0;         \
+	VMOVQ_SI_X14(12*8);     \
+	VMOVQ_SI_X15(11*8);     \
+	VPINSRQ_1_SI_X12(6*8);  \
+	VPINSRQ_1_SI_X13(8*8);  \
+	VPINSRQ_1_SI_X14(10*8); \
+	VPINSRQ_1_SI_X15(3*8)
+
+// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
+#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
+	MOVQ    0*8(SI), X12;        \
+	VPSHUFD $0x4E, 8*8(SI), X13; \
+	MOVQ    7*8(SI), X14;        \
+	MOVQ    2*8(SI), X15;        \
+	VPINSRQ_1_SI_X12(6*8);       \
+	VPINSRQ_1_SI_X14(3*8);       \
+	VPINSRQ_1_SI_X15(11*8)
+
+// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
+#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
+	MOVQ 6*8(SI), X12;      \
+	MOVQ 11*8(SI), X13;     \
+	MOVQ 15*8(SI), X14;     \
+	MOVQ 3*8(SI), X15;      \
+	VPINSRQ_1_SI_X12(14*8); \
+	VPINSRQ_1_SI_X13_0;     \
+	VPINSRQ_1_SI_X14(9*8);  \
+	VPINSRQ_1_SI_X15(8*8)
+
+// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
+#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
+	MOVQ 5*8(SI), X12;      \
+	MOVQ 8*8(SI), X13;      \
+	MOVQ 0*8(SI), X14;      \
+	MOVQ 6*8(SI), X15;      \
+	VPINSRQ_1_SI_X12(15*8); \
+	VPINSRQ_1_SI_X13(2*8);  \
+	VPINSRQ_1_SI_X14(4*8);  \
+	VPINSRQ_1_SI_X15(10*8)
+
+// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
+#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
+	VMOVDQU 12*8(SI), X12;  \
+	MOVQ    1*8(SI), X13;   \
+	MOVQ    2*8(SI), X14;   \
+	VPINSRQ_1_SI_X13(10*8); \
+	VPINSRQ_1_SI_X14(7*8);  \
+	VMOVDQU 4*8(SI), X15
+
+// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
+#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
+	MOVQ 15*8(SI), X12;     \
+	MOVQ 3*8(SI), X13;      \
+	MOVQ 11*8(SI), X14;     \
+	MOVQ 12*8(SI), X15;     \
+	VPINSRQ_1_SI_X12(9*8);  \
+	VPINSRQ_1_SI_X13(13*8); \
+	VPINSRQ_1_SI_X14(14*8); \
+	VPINSRQ_1_SI_X15_0
 
 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
 TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
@@ -331,15 +590,17 @@
 	ANDQ $~15, R9
 	MOVQ R9, SP
 
-	MOVOU ·AVX_c40<>(SB), X13
-	MOVOU ·AVX_c48<>(SB), X14
+	VMOVDQU ·AVX_c40<>(SB), X0
+	VMOVDQU ·AVX_c48<>(SB), X1
+	VMOVDQA X0, X8
+	VMOVDQA X1, X9
 
 	VMOVDQU ·AVX_iv3<>(SB), X0
 	VMOVDQA X0, 0(SP)
 	XORQ    CX, 0(SP)          // 0(SP) = ·AVX_iv3 ^ (CX || 0)
 
-	VMOVDQU 0(AX), X12
-	VMOVDQU 16(AX), X15
+	VMOVDQU 0(AX), X10
+	VMOVDQU 16(AX), X11
 	VMOVDQU 32(AX), X2
 	VMOVDQU 48(AX), X3
 
@@ -353,124 +614,124 @@
 	INCQ R9
 
 noinc:
-	MOVQ   R8, X8
-	VPINSRQ_1_R9_X8_X8
+	VMOVQ_R8_X15
+	VPINSRQ_1_R9_X15
 
-	VMOVDQA X12, X0
-	VMOVDQA X15, X1
+	VMOVDQA X10, X0
+	VMOVDQA X11, X1
 	VMOVDQU ·AVX_iv0<>(SB), X4
 	VMOVDQU ·AVX_iv1<>(SB), X5
 	VMOVDQU ·AVX_iv2<>(SB), X6
 
-	VPXOR   X8, X6, X6
+	VPXOR   X15, X6, X6
 	VMOVDQA 0(SP), X7
 
-	LOAD_MSG_AVX(SI, 0, 2, 4, 6, 1, 3, 5, 7)
-	VMOVDQA X8, 16(SP)
-	VMOVDQA X9, 32(SP)
-	VMOVDQA X10, 48(SP)
-	VMOVDQA X11, 64(SP)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
+	VMOVDQA X12, 16(SP)
+	VMOVDQA X13, 32(SP)
+	VMOVDQA X14, 48(SP)
+	VMOVDQA X15, 64(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 8, 10, 12, 14, 9, 11, 13, 15)
-	VMOVDQA X8, 80(SP)
-	VMOVDQA X9, 96(SP)
-	VMOVDQA X10, 112(SP)
-	VMOVDQA X11, 128(SP)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
+	VMOVDQA X12, 80(SP)
+	VMOVDQA X13, 96(SP)
+	VMOVDQA X14, 112(SP)
+	VMOVDQA X15, 128(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 14, 4, 9, 13, 10, 8, 15, 6)
-	VMOVDQA X8, 144(SP)
-	VMOVDQA X9, 160(SP)
-	VMOVDQA X10, 176(SP)
-	VMOVDQA X11, 192(SP)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
+	VMOVDQA X12, 144(SP)
+	VMOVDQA X13, 160(SP)
+	VMOVDQA X14, 176(SP)
+	VMOVDQA X15, 192(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 1, 0, 11, 5, 12, 2, 7, 3)
-	VMOVDQA X8, 208(SP)
-	VMOVDQA X9, 224(SP)
-	VMOVDQA X10, 240(SP)
-	VMOVDQA X11, 256(SP)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
+	VMOVDQA X12, 208(SP)
+	VMOVDQA X13, 224(SP)
+	VMOVDQA X14, 240(SP)
+	VMOVDQA X15, 256(SP)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 11, 12, 5, 15, 8, 0, 2, 13)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 10, 3, 7, 9, 14, 6, 1, 4)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 7, 3, 13, 11, 9, 1, 12, 14)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 2, 5, 4, 15, 6, 10, 0, 8)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 9, 5, 2, 10, 0, 7, 4, 15)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 14, 11, 6, 3, 1, 12, 8, 13)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 2, 6, 0, 8, 12, 10, 11, 3)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 4, 7, 15, 1, 13, 5, 14, 9)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 12, 1, 14, 4, 5, 15, 13, 10)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 0, 6, 9, 8, 7, 3, 2, 11)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 13, 7, 12, 3, 11, 14, 1, 9)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 5, 15, 8, 2, 0, 4, 6, 10)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 6, 14, 11, 0, 15, 9, 3, 8)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 12, 13, 1, 10, 2, 7, 4, 5)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	LOAD_MSG_AVX(SI, 10, 8, 7, 1, 2, 4, 6, 5)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX()
-	LOAD_MSG_AVX(SI, 15, 9, 3, 13, 11, 14, 12, 0)
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
+	LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
 	SHUFFLE_AVX()
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
 	SHUFFLE_AVX()
-	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
+	HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
 	SHUFFLE_AVX_INV()
 
-	VMOVDQU 32(AX), X10
-	VMOVDQU 48(AX), X11
-	VPXOR   X0, X12, X12
-	VPXOR   X1, X15, X15
-	VPXOR   X2, X10, X10
-	VPXOR   X3, X11, X11
-	VPXOR   X4, X12, X12
-	VPXOR   X5, X15, X15
-	VPXOR   X6, X10, X2
-	VPXOR   X7, X11, X3
+	VMOVDQU 32(AX), X14
+	VMOVDQU 48(AX), X15
+	VPXOR   X0, X10, X10
+	VPXOR   X1, X11, X11
+	VPXOR   X2, X14, X14
+	VPXOR   X3, X15, X15
+	VPXOR   X4, X10, X10
+	VPXOR   X5, X11, X11
+	VPXOR   X6, X14, X2
+	VPXOR   X7, X15, X3
 	VMOVDQU X2, 32(AX)
 	VMOVDQU X3, 48(AX)
 
@@ -478,12 +739,11 @@
 	SUBQ $128, DI
 	JNE  loop
 
-	VMOVDQU X12, 0(AX)
-	VMOVDQU X15, 16(AX)
+	VMOVDQU X10, 0(AX)
+	VMOVDQU X11, 16(AX)
 
 	MOVQ R8, 0(BX)
 	MOVQ R9, 8(BX)
-
 	VZEROUPPER
 
 	MOVQ BP, SP