# qhasm: int64 input_0 # qhasm: int64 input_1 # qhasm: int64 input_2 # qhasm: int64 input_3 # qhasm: int64 input_4 # qhasm: int64 input_5 # qhasm: stack64 input_6 # qhasm: stack64 input_7 # qhasm: int64 caller_r11 # qhasm: int64 caller_r12 # qhasm: int64 caller_r13 # qhasm: int64 caller_r14 # qhasm: int64 caller_r15 # qhasm: int64 caller_rbx # qhasm: int64 caller_rbp # qhasm: stack64 r11_stack # qhasm: stack64 r12_stack # qhasm: stack64 r13_stack # qhasm: stack64 r14_stack # qhasm: stack64 r15_stack # qhasm: stack64 rbx_stack # qhasm: stack64 rbp_stack # qhasm: int64 statebytes # qhasm: stack64 statebytes_stack # qhasm: int64 in # qhasm: stack64 in_stack # qhasm: int64 inlen # qhasm: stack64 inlen_stack # qhasm: int64 constants # qhasm: stack64 constants_stack # qhasm: int64 r0 # qhasm: int64 r1 # qhasm: int64 r2 # qhasm: int64 r3 # qhasm: int64 r4 # qhasm: int64 r5 # qhasm: int64 r6 # qhasm: int64 r7 # qhasm: int64 i # qhasm: stack256 state0123 # qhasm: stack256 state4567 # qhasm: reg256 X0 # qhasm: reg256 X4 # qhasm: reg256 X8 # qhasm: reg256 X12 # qhasm: reg256 X1 # qhasm: reg256 X5 # qhasm: reg256 X9 # qhasm: reg256 X13 # qhasm: reg256 bigendian64 # qhasm: reg256 D0 # qhasm: reg256 D4 # qhasm: reg256 D8 # qhasm: reg256 D12 # qhasm: reg256 W0 # qhasm: reg256 W2 # qhasm: reg256 W4 # qhasm: reg256 W6 # qhasm: reg256 W8 # qhasm: reg256 W10 # qhasm: reg256 W12 # qhasm: reg256 W14 # qhasm: stack1280 w # qhasm: stack256 wc0123 # qhasm: stack256 wc4567 # qhasm: stack256 wc891011 # qhasm: stack256 wc12131415 # qhasm: int64 r0andr1 # qhasm: int64 r2andr3 # qhasm: int64 r4andr5 # qhasm: int64 r6andr7 # qhasm: int64 ch0 # qhasm: int64 ch1 # qhasm: int64 ch2 # qhasm: int64 ch3 # qhasm: int64 ch4 # qhasm: int64 ch5 # qhasm: int64 ch6 # qhasm: int64 ch7 # qhasm: int64 maj0 # qhasm: int64 maj1 # qhasm: int64 maj2 # qhasm: int64 maj3 # qhasm: int64 maj4 # qhasm: int64 maj5 # qhasm: int64 maj6 # qhasm: int64 maj7 # qhasm: int64 r0Sigma0 # qhasm: int64 r1Sigma0 # qhasm: int64 r2Sigma0 # qhasm: int64 r3Sigma0 # qhasm: int64 r4Sigma0 # qhasm: int64 r5Sigma0 # qhasm: int64 r6Sigma0 # qhasm: int64 r7Sigma0 # qhasm: int64 r0Sigma1 # qhasm: int64 r1Sigma1 # qhasm: int64 r2Sigma1 # qhasm: int64 r3Sigma1 # qhasm: int64 r4Sigma1 # qhasm: int64 r5Sigma1 # qhasm: int64 r6Sigma1 # qhasm: int64 r7Sigma1 # qhasm: int64 r018 # qhasm: int64 r118 # qhasm: int64 r218 # qhasm: int64 r318 # qhasm: int64 r418 # qhasm: int64 r518 # qhasm: int64 r618 # qhasm: int64 r718 # qhasm: int64 r041 # qhasm: int64 r141 # qhasm: int64 r241 # qhasm: int64 r341 # qhasm: int64 r441 # qhasm: int64 r541 # qhasm: int64 r641 # qhasm: int64 r741 # qhasm: int64 r034 # qhasm: int64 r134 # qhasm: int64 r234 # qhasm: int64 r334 # qhasm: int64 r434 # qhasm: int64 r534 # qhasm: int64 r634 # qhasm: int64 r734 # qhasm: int64 r039 # qhasm: int64 r139 # qhasm: int64 r239 # qhasm: int64 r339 # qhasm: int64 r439 # qhasm: int64 r539 # qhasm: int64 r639 # qhasm: int64 r739 # qhasm: reg256 X1right1 # qhasm: reg256 X1left63 # qhasm: reg256 X1right8 # qhasm: reg256 X1left56 # qhasm: reg256 X1right7 # qhasm: reg256 X1sigma0 # qhasm: reg256 X5right1 # qhasm: reg256 X5left63 # qhasm: reg256 X5right8 # qhasm: reg256 X5left56 # qhasm: reg256 X5right7 # qhasm: reg256 X5sigma0 # qhasm: reg256 X9right1 # qhasm: reg256 X9left63 # qhasm: reg256 X9right8 # qhasm: reg256 X9left56 # qhasm: reg256 X9right7 # qhasm: reg256 X9sigma0 # qhasm: reg256 X13right1 # qhasm: reg256 X13left63 # qhasm: reg256 X13right8 # qhasm: reg256 X13left56 # qhasm: reg256 X13right7 # qhasm: reg256 X13sigma0 # qhasm: reg256 W0right19 # qhasm: reg256 W0right61 # qhasm: reg256 W0right6 # qhasm: reg256 W0left45 # qhasm: reg256 W0left3 # qhasm: reg256 W0sigma1 # qhasm: reg256 W2right19 # qhasm: reg256 W2right61 # qhasm: reg256 W2right6 # qhasm: reg256 W2left45 # qhasm: reg256 W2left3 # qhasm: reg256 W2sigma1 # qhasm: reg256 W4right19 # qhasm: reg256 W4right61 # qhasm: reg256 W4right6 # qhasm: reg256 W4left45 # qhasm: reg256 W4left3 # qhasm: reg256 W4sigma1 # qhasm: reg256 W6right19 # qhasm: reg256 W6right61 # qhasm: reg256 W6right6 # qhasm: reg256 W6left45 # qhasm: reg256 W6left3 # qhasm: reg256 W6sigma1 # qhasm: reg256 W8right19 # qhasm: reg256 W8right61 # qhasm: reg256 W8right6 # qhasm: reg256 W8left45 # qhasm: reg256 W8left3 # qhasm: reg256 W8sigma1 # qhasm: reg256 W10right19 # qhasm: reg256 W10right61 # qhasm: reg256 W10right6 # qhasm: reg256 W10left45 # qhasm: reg256 W10left3 # qhasm: reg256 W10sigma1 # qhasm: reg256 W12right19 # qhasm: reg256 W12right61 # qhasm: reg256 W12right6 # qhasm: reg256 W12left45 # qhasm: reg256 W12left3 # qhasm: reg256 W12sigma1 # qhasm: reg256 W14right19 # qhasm: reg256 W14right61 # qhasm: reg256 W14right6 # qhasm: reg256 W14left45 # qhasm: reg256 W14left3 # qhasm: reg256 W14sigma1 # qhasm: enter CRYPTO_NAMESPACE(inner) .p2align 7 .global _CRYPTO_NAMESPACE(inner) .global CRYPTO_NAMESPACE(inner) _CRYPTO_NAMESPACE(inner): CRYPTO_NAMESPACE(inner): mov %rsp,%r11 and $511,%r11 add $416,%r11 sub %r11,%rsp # qhasm: constants = input_3 # asm 1: mov constants=int64#5 # asm 2: mov constants=%r8 mov %rcx,%r8 # qhasm: bigendian64 = mem256[input_3+640] # asm 1: vmovupd 640(bigendian64=reg256#1 # asm 2: vmovupd 640(bigendian64=%ymm0 vmovupd 640(%rcx),%ymm0 # qhasm: X0 = mem256[input_0+0] # asm 1: vmovupd 0(X0=reg256#2 # asm 2: vmovupd 0(X0=%ymm1 vmovupd 0(%rdi),%ymm1 # qhasm: statebytes = input_0 # asm 1: mov statebytes=int64#4 # asm 2: mov statebytes=%rcx mov %rdi,%rcx # qhasm: X4 = mem256[input_0+32] # asm 1: vmovupd 32(X4=reg256#3 # asm 2: vmovupd 32(X4=%ymm2 vmovupd 32(%rdi),%ymm2 # qhasm: 2x 16x X0 = X0[bigendian64] # asm 1: vpshufb X0=reg256#2 # asm 2: vpshufb X0=%ymm1 vpshufb %ymm0,%ymm1,%ymm1 # qhasm: 2x 16x X4 = X4[bigendian64] # asm 1: vpshufb X4=reg256#3 # asm 2: vpshufb X4=%ymm2 vpshufb %ymm0,%ymm2,%ymm2 # qhasm: state0123 = X0 # asm 1: vmovapd state0123=stack256#1 # asm 2: vmovapd state0123=0(%rsp) vmovapd %ymm1,0(%rsp) # qhasm: r11_stack = caller_r11 # asm 1: movq r11_stack=stack64#1 # asm 2: movq r11_stack=320(%rsp) movq %r11,320(%rsp) # qhasm: state4567 = X4 # asm 1: vmovapd state4567=stack256#2 # asm 2: vmovapd state4567=32(%rsp) vmovapd %ymm2,32(%rsp) # qhasm: r13_stack = caller_r13 # asm 1: movq r13_stack=stack64#2 # asm 2: movq r13_stack=328(%rsp) movq %r13,328(%rsp) # qhasm: r12_stack = caller_r12 # asm 1: movq r12_stack=stack64#3 # asm 2: movq r12_stack=336(%rsp) movq %r12,336(%rsp) # qhasm: r14_stack = caller_r14 # asm 1: movq r14_stack=stack64#4 # asm 2: movq r14_stack=344(%rsp) movq %r14,344(%rsp) # qhasm: rbx_stack = caller_rbx # asm 1: movq rbx_stack=stack64#5 # asm 2: movq rbx_stack=352(%rsp) movq %rbx,352(%rsp) # qhasm: r15_stack = caller_r15 # asm 1: movq r15_stack=stack64#6 # asm 2: movq r15_stack=360(%rsp) movq %r15,360(%rsp) # qhasm: rbp_stack = caller_rbp # asm 1: movq rbp_stack=stack64#7 # asm 2: movq rbp_stack=368(%rsp) movq %rbp,368(%rsp) # qhasm: inlen_stack = input_2 # asm 1: movq inlen_stack=stack64#8 # asm 2: movq inlen_stack=376(%rsp) movq %rdx,376(%rsp) # qhasm: in = input_1 # asm 1: mov in=int64#1 # asm 2: mov in=%rdi mov %rsi,%rdi # qhasm: statebytes_stack = statebytes # asm 1: movq statebytes_stack=stack64#9 # asm 2: movq statebytes_stack=384(%rsp) movq %rcx,384(%rsp) # qhasm: r0 = state0123[0] # asm 1: movq r0=int64#2 # asm 2: movq r0=%rsi movq 0(%rsp),%rsi # qhasm: r2 = state0123[2] # asm 1: movq r2=int64#3 # asm 2: movq r2=%rdx movq 16(%rsp),%rdx # qhasm: constants_stack = constants # asm 1: movq constants_stack=stack64#10 # asm 2: movq constants_stack=392(%rsp) movq %r8,392(%rsp) # qhasm: r1 = state0123[1] # asm 1: movq r1=int64#4 # asm 2: movq r1=%rcx movq 8(%rsp),%rcx # qhasm: r3 = state0123[3] # asm 1: movq r3=int64#6 # asm 2: movq r3=%r9 movq 24(%rsp),%r9 # qhasm: r5 = state4567[1] # asm 1: movq r5=int64#8 # asm 2: movq r5=%r10 movq 40(%rsp),%r10 # qhasm: r4 = state4567[0] # asm 1: movq r4=int64#9 # asm 2: movq r4=%r11 movq 32(%rsp),%r11 # qhasm: r6 = state4567[2] # asm 1: movq r6=int64#10 # asm 2: movq r6=%r12 movq 48(%rsp),%r12 # qhasm: r7 = state4567[3] # asm 1: movq r7=int64#11 # asm 2: movq r7=%r13 movq 56(%rsp),%r13 # qhasm: new w # qhasm: nop9 .byte 0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 # qhasm: nop9 .byte 0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 # qhasm: nop2 .byte 0x66,0x90 # qhasm: outerloop: ._outerloop: # qhasm: X0 = mem256[in + 0] # asm 1: vmovupd 0(X0=reg256#2 # asm 2: vmovupd 0(X0=%ymm1 vmovupd 0(%rdi),%ymm1 # qhasm: 2x 16x X0 = X0[bigendian64] # asm 1: vpshufb X0=reg256#2 # asm 2: vpshufb X0=%ymm1 vpshufb %ymm0,%ymm1,%ymm1 # qhasm: ch7 = r6 # asm 1: mov ch7=int64#7 # asm 2: mov ch7=%rax mov %r12,%rax # qhasm: r4Sigma1 = r4>>>14 # asm 1: rorx $14,r4Sigma1=int64#12 # asm 2: rorx $14,r4Sigma1=%r14 rorx $14,%r11,%r14 # qhasm: ch7 ^= r5 # asm 1: xor D0=reg256#3 # asm 2: vpaddq 0(D0=%ymm2 vpaddq 0(%r8),%ymm1,%ymm2 # qhasm: r418 = r4>>>18 # asm 1: rorx $18,r418=int64#13 # asm 2: rorx $18,r418=%r15 rorx $18,%r11,%r15 # qhasm: r4Sigma1 ^= r418 # asm 1: xor >>41 # asm 1: rorx $41,r441=int64#13 # asm 2: rorx $41,r441=%r15 rorx $41,%r11,%r15 # qhasm: r4Sigma1 ^= r441 # asm 1: xor >>28 # asm 1: rorx $28,r0Sigma0=int64#13 # asm 2: rorx $28,r0Sigma0=%r15 rorx $28,%rsi,%r15 # qhasm: ch7 ^= r6 # asm 1: xor >>34 # asm 1: rorx $34,r034=int64#14 # asm 2: rorx $34,r034=%rbx rorx $34,%rsi,%rbx # qhasm: r039 = r0>>>39 # asm 1: rorx $39,r039=int64#15 # asm 2: rorx $39,r039=%rbp rorx $39,%rsi,%rbp # qhasm: inplace state4567[3] = r7 # asm 1: movq maj6=int64#7 # asm 2: mov maj6=%rax mov %rcx,%rax # qhasm: maj6 ^= r0 # asm 1: xor r0andr1=int64#14 # asm 2: mov r0andr1=%rbx mov %rcx,%rbx # qhasm: r0andr1 &= r0 # asm 1: and maj7=int64#12 # asm 2: mov maj7=%r14 mov %rdx,%r14 # qhasm: wc0123 = D0 # asm 1: vmovapd wc0123=stack256#3 # asm 2: vmovapd wc0123=64(%rsp) vmovapd %ymm2,64(%rsp) # qhasm: r7 += wc0123[0] # asm 1: addq ch6=int64#13 # asm 2: mov ch6=%r15 mov %r10,%r15 # qhasm: r3Sigma1 = r3>>>14 # asm 1: rorx $14,r3Sigma1=int64#15 # asm 2: rorx $14,r3Sigma1=%rbp rorx $14,%r9,%rbp # qhasm: ch6 ^= r4 # asm 1: xor X4=reg256#3 # asm 2: vmovupd 32(X4=%ymm2 vmovupd 32(%rdi),%ymm2 # qhasm: 2x 16x X4 = X4[bigendian64] # asm 1: vpshufb X4=reg256#3 # asm 2: vpshufb X4=%ymm2 vpshufb %ymm0,%ymm2,%ymm2 # qhasm: r7 += maj7 # asm 1: add >>18 # asm 1: rorx $18,r318=int64#12 # asm 2: rorx $18,r318=%r14 rorx $18,%r9,%r14 # qhasm: 4x D4 = X4 + mem256[constants + 32] # asm 1: vpaddq 32(D4=reg256#4 # asm 2: vpaddq 32(D4=%ymm3 vpaddq 32(%r8),%ymm2,%ymm3 # qhasm: ch6 &= r3 # asm 1: and >>41 # asm 1: rorx $41,r341=int64#12 # asm 2: rorx $41,r341=%r14 rorx $41,%r9,%r14 # qhasm: ch6 ^= r5 # asm 1: xor >>28 # asm 1: rorx $28,r7Sigma0=int64#14 # asm 2: rorx $28,r7Sigma0=%rbx rorx $28,%r13,%rbx # qhasm: inplace state4567[1] = r5 # asm 1: movq >>34 # asm 1: rorx $34,r734=int64#12 # asm 2: rorx $34,r734=%r14 rorx $34,%r13,%r14 # qhasm: inplace state0123[2] = r2 # asm 1: movq >>39 # asm 1: rorx $39,r739=int64#13 # asm 2: rorx $39,r739=%r15 rorx $39,%r13,%r15 # qhasm: r7Sigma0 ^= r734 # asm 1: xor ch5=int64#12 # asm 2: mov ch5=%r14 mov %r11,%r14 # qhasm: ch5 ^= r3 # asm 1: xor >>14 # asm 1: rorx $14,r2Sigma1=int64#7 # asm 2: rorx $14,r2Sigma1=%rax rorx $14,%rdx,%rax # qhasm: r6 += r7Sigma0 # asm 1: add >>18 # asm 1: rorx $18,r218=int64#13 # asm 2: rorx $18,r218=%r15 rorx $18,%rdx,%r15 # qhasm: mem256[&w + 32] = X4 # asm 1: vmovupd >>41 # asm 1: rorx $41,r241=int64#13 # asm 2: rorx $41,r241=%r15 rorx $41,%rdx,%r15 # qhasm: maj4 = r7 # asm 1: mov maj4=int64#14 # asm 2: mov maj4=%rbx mov %r13,%rbx # qhasm: maj4 ^= r6 # asm 1: xor >>28 # asm 1: rorx $28,r6Sigma0=int64#15 # asm 2: rorx $28,r6Sigma0=%rbp rorx $28,%r12,%rbp # qhasm: wc4567 = D4 # asm 1: vmovapd wc4567=stack256#4 # asm 2: vmovapd wc4567=96(%rsp) vmovapd %ymm3,96(%rsp) # qhasm: r2Sigma1 ^= r241 # asm 1: xor >>34 # asm 1: rorx $34,r634=int64#12 # asm 2: rorx $34,r634=%r14 rorx $34,%r12,%r14 # qhasm: in_stack = in # asm 1: movq in_stack=stack64#11 # asm 2: movq in_stack=400(%rsp) movq %rdi,400(%rsp) # qhasm: r6Sigma0 ^= r634 # asm 1: xor >>39 # asm 1: rorx $39,r639=int64#12 # asm 2: rorx $39,r639=%r14 rorx $39,%r12,%r14 # qhasm: r6andr7 = r7 # asm 1: mov r6andr7=int64#13 # asm 2: mov r6andr7=%r15 mov %r13,%r15 # qhasm: r6andr7 &= r6 # asm 1: and maj5=int64#12 # asm 2: mov maj5=%r14 mov %rsi,%r14 # qhasm: inplace state0123[1] = r1 # asm 1: movq ch4=int64#7 # asm 2: mov ch4=%rax mov %r9,%rax # qhasm: r1Sigma1 = r1>>>14 # asm 1: rorx $14,r1Sigma1=int64#15 # asm 2: rorx $14,r1Sigma1=%rbp rorx $14,%rcx,%rbp # qhasm: ch4 ^= r2 # asm 1: xor >>18 # asm 1: rorx $18,r118=int64#12 # asm 2: rorx $18,r118=%r14 rorx $18,%rcx,%r14 # qhasm: inplace state0123[0] = r0 # asm 1: movq >>41 # asm 1: rorx $41,r141=int64#12 # asm 2: rorx $41,r141=%r14 rorx $41,%rcx,%r14 # qhasm: X8 = mem256[in + 64] # asm 1: vmovupd 64(X8=reg256#4 # asm 2: vmovupd 64(X8=%ymm3 vmovupd 64(%rdi),%ymm3 # qhasm: r1Sigma1 ^= r141 # asm 1: xor >>28 # asm 1: rorx $28,r5Sigma0=int64#12 # asm 2: rorx $28,r5Sigma0=%r14 rorx $28,%r10,%r14 # qhasm: maj4 ^= r6andr7 # asm 1: xor >>34 # asm 1: rorx $34,r534=int64#7 # asm 2: rorx $34,r534=%rax rorx $34,%r10,%rax # qhasm: r4 += r1Sigma1 # asm 1: add X12=reg256#5 # asm 2: vmovupd 96(X12=%ymm4 vmovupd 96(%rdi),%ymm4 # qhasm: r0 += r4 # asm 1: add >>39 # asm 1: rorx $39,r539=int64#1 # asm 2: rorx $39,r539=%rdi rorx $39,%r10,%rdi # qhasm: r4 += maj4 # asm 1: add >>14 # asm 1: rorx $14,r0Sigma1=int64#1 # asm 2: rorx $14,r0Sigma1=%rdi rorx $14,%rsi,%rdi # qhasm: r4 += r5Sigma0 # asm 1: add ch3=int64#7 # asm 2: mov ch3=%rax mov %rdx,%rax # qhasm: r018 = r0>>>18 # asm 1: rorx $18,r018=int64#12 # asm 2: rorx $18,r018=%r14 rorx $18,%rsi,%r14 # qhasm: ch3 ^= r1 # asm 1: xor X8=reg256#4 # asm 2: vpshufb X8=%ymm3 vpshufb %ymm0,%ymm3,%ymm3 # qhasm: r0Sigma1 ^= r018 # asm 1: xor >>41 # asm 1: rorx $41,r041=int64#12 # asm 2: rorx $41,r041=%r14 rorx $41,%rsi,%r14 # qhasm: r4Sigma0 = r4>>>28 # asm 1: rorx $28,r4Sigma0=int64#13 # asm 2: rorx $28,r4Sigma0=%r15 rorx $28,%r11,%r15 # qhasm: 4x D8 = X8 + mem256[constants + 64] # asm 1: vpaddq 64(D8=reg256#6 # asm 2: vpaddq 64(D8=%ymm5 vpaddq 64(%r8),%ymm3,%ymm5 # qhasm: r0Sigma1 ^= r041 # asm 1: xor >>34 # asm 1: rorx $34,r434=int64#7 # asm 2: rorx $34,r434=%rax rorx $34,%r11,%rax # qhasm: r439 = r4>>>39 # asm 1: rorx $39,r439=int64#12 # asm 2: rorx $39,r439=%r14 rorx $39,%r11,%r14 # qhasm: maj2 = r5 # asm 1: mov maj2=int64#14 # asm 2: mov maj2=%rbx mov %r10,%rbx # qhasm: maj2 ^= r4 # asm 1: xor wc891011=stack256#3 # asm 2: vmovapd wc891011=64(%rsp) vmovapd %ymm5,64(%rsp) # qhasm: r4Sigma0 ^= r434 # asm 1: xor r4andr5=int64#1 # asm 2: mov r4andr5=%rdi mov %r10,%rdi # qhasm: r4andr5 &= r4 # asm 1: and maj3=int64#7 # asm 2: mov maj3=%rax mov %r12,%rax # qhasm: maj3 &= maj2 # asm 1: and X12=reg256#5 # asm 2: vpshufb X12=%ymm4 vpshufb %ymm0,%ymm4,%ymm4 # qhasm: ch2 = r1 # asm 1: mov ch2=int64#12 # asm 2: mov ch2=%r14 mov %rcx,%r14 # qhasm: maj3 ^= r4andr5 # asm 1: xor >>14 # asm 1: rorx $14,r7Sigma1=int64#7 # asm 2: rorx $14,r7Sigma1=%rax rorx $14,%r13,%rax # qhasm: 4x D12 = X12 + mem256[constants + 96] # asm 1: vpaddq 96(D12=reg256#6 # asm 2: vpaddq 96(D12=%ymm5 vpaddq 96(%r8),%ymm4,%ymm5 # qhasm: ch2 &= r7 # asm 1: and >>18 # asm 1: rorx $18,r718=int64#13 # asm 2: rorx $18,r718=%r15 rorx $18,%r13,%r15 # qhasm: r7Sigma1 ^= r718 # asm 1: xor >>41 # asm 1: rorx $41,r741=int64#13 # asm 2: rorx $41,r741=%r15 rorx $41,%r13,%r15 # qhasm: mem256[&w + 96] = X12 # asm 1: vmovupd >>28 # asm 1: rorx $28,r3Sigma0=int64#1 # asm 2: rorx $28,r3Sigma0=%rdi rorx $28,%r9,%rdi # qhasm: wc12131415 = D12 # asm 1: vmovapd wc12131415=stack256#5 # asm 2: vmovapd wc12131415=128(%rsp) vmovapd %ymm5,128(%rsp) # qhasm: r2 += ch2 # asm 1: add ch1=int64#7 # asm 2: mov ch1=%rax mov %rsi,%rax # qhasm: r334 = r3>>>34 # asm 1: rorx $34,r334=int64#12 # asm 2: rorx $34,r334=%r14 rorx $34,%r9,%r14 # qhasm: ch1 ^= r7 # asm 1: xor >>39 # asm 1: rorx $39,r339=int64#12 # asm 2: rorx $39,r339=%r14 rorx $39,%r9,%r14 # qhasm: r2 += maj2 # asm 1: add >>14 # asm 1: rorx $14,r6Sigma1=int64#12 # asm 2: rorx $14,r6Sigma1=%r14 rorx $14,%r12,%r14 # qhasm: r618 = r6>>>18 # asm 1: rorx $18,r618=int64#13 # asm 2: rorx $18,r618=%r15 rorx $18,%r12,%r15 # qhasm: r641 = r6>>>41 # asm 1: rorx $41,r641=int64#14 # asm 2: rorx $41,r641=%rbx rorx $41,%r12,%rbx # qhasm: ch1 &= r6 # asm 1: and >>28 # asm 1: rorx $28,r2Sigma0=int64#1 # asm 2: rorx $28,r2Sigma0=%rdi rorx $28,%rdx,%rdi # qhasm: r6Sigma1 ^= r641 # asm 1: xor >>34 # asm 1: rorx $34,r234=int64#13 # asm 2: rorx $34,r234=%r15 rorx $34,%rdx,%r15 # qhasm: maj0 = r3 # asm 1: mov maj0=int64#14 # asm 2: mov maj0=%rbx mov %r9,%rbx # qhasm: maj0 ^= r2 # asm 1: xor >>39 # asm 1: rorx $39,r239=int64#7 # asm 2: rorx $39,r239=%rax rorx $39,%rdx,%rax # qhasm: r2andr3 = r3 # asm 1: mov r2andr3=int64#13 # asm 2: mov r2andr3=%r15 mov %r9,%r15 # qhasm: r2andr3 &= r2 # asm 1: and maj1=int64#7 # asm 2: mov maj1=%rax mov %r11,%rax # qhasm: maj1 &= maj0 # asm 1: and ch0=int64#1 # asm 2: mov ch0=%rdi mov %r13,%rdi # qhasm: maj1 ^= r2andr3 # asm 1: xor >>14 # asm 1: rorx $14,r5Sigma1=int64#12 # asm 2: rorx $14,r5Sigma1=%r14 rorx $14,%r10,%r14 # qhasm: r1 += maj1 # asm 1: add >>18 # asm 1: rorx $18,r518=int64#7 # asm 2: rorx $18,r518=%rax rorx $18,%r10,%rax # qhasm: r5Sigma1 ^= r518 # asm 1: xor >>41 # asm 1: rorx $41,r541=int64#7 # asm 2: rorx $41,r541=%rax rorx $41,%r10,%rax # qhasm: r5Sigma1 ^= r541 # asm 1: xor >>28 # asm 1: rorx $28,r1Sigma0=int64#7 # asm 2: rorx $28,r1Sigma0=%rax rorx $28,%rcx,%rax # qhasm: r0 += ch0 # asm 1: add >>34 # asm 1: rorx $34,r134=int64#1 # asm 2: rorx $34,r134=%rdi rorx $34,%rcx,%rdi # qhasm: r1Sigma0 ^= r134 # asm 1: xor >>39 # asm 1: rorx $39,r139=int64#1 # asm 2: rorx $39,r139=%rdi rorx $39,%rcx,%rdi # qhasm: r1Sigma0 ^= r139 # asm 1: xor i=int64#1 # asm 2: mov $4,>i=%rdi mov $4,%rdi # qhasm: innerloop: ._innerloop: # qhasm: X1 = mem256[&w + 8] # asm 1: vmovupd X1=reg256#6 # asm 2: vmovupd X1=%ymm5 vmovupd 168(%rsp),%ymm5 # qhasm: 4x X1right1 = X1 unsigned>> 1 # asm 1: vpsrlq $1,X1right1=reg256#7 # asm 2: vpsrlq $1,X1right1=%ymm6 vpsrlq $1,%ymm5,%ymm6 # qhasm: r4Sigma1 = r4>>>14 # asm 1: rorx $14,r4Sigma1=int64#7 # asm 2: rorx $14,r4Sigma1=%rax rorx $14,%r11,%rax # qhasm: r7 += wc891011[0] # asm 1: addq X1left63=reg256#8 # asm 2: vpsllq $63,X1left63=%ymm7 vpsllq $63,%ymm5,%ymm7 # qhasm: ch7 = r6 # asm 1: mov ch7=int64#12 # asm 2: mov ch7=%r14 mov %r12,%r14 # qhasm: ch7 ^= r5 # asm 1: xor >>18 # asm 1: rorx $18,r418=int64#13 # asm 2: rorx $18,r418=%r15 rorx $18,%r11,%r15 # qhasm: ch7 &= r4 # asm 1: and maj6=int64#14 # asm 2: mov maj6=%rbx mov %rcx,%rbx # qhasm: maj6 ^= r0 # asm 1: xor W14=reg256#9%128 # asm 2: vmovupd W14=%xmm8 vmovupd 272(%rsp),%xmm8 # qhasm: r441 = r4>>>41 # asm 1: rorx $41,r441=int64#15 # asm 2: rorx $41,r441=%rbp rorx $41,%r11,%rbp # qhasm: r4Sigma1 ^= r418 # asm 1: xor X1sigma0=reg256#7 # asm 2: vpxor X1sigma0=%ymm6 vpxor %ymm6,%ymm7,%ymm6 # qhasm: 4x X1right8 = X1 unsigned>> 8 # asm 1: vpsrlq $8,X1right8=reg256#8 # asm 2: vpsrlq $8,X1right8=%ymm7 vpsrlq $8,%ymm5,%ymm7 # qhasm: r4Sigma1 ^= r441 # asm 1: xor >>28 # asm 1: rorx $28,r0Sigma0=int64#12 # asm 2: rorx $28,r0Sigma0=%r14 rorx $28,%rsi,%r14 # qhasm: r034 = r0>>>34 # asm 1: rorx $34,r034=int64#13 # asm 2: rorx $34,r034=%r15 rorx $34,%rsi,%r15 # qhasm: X1sigma0 = X1sigma0 ^ X1right8 # asm 1: vpxor X1sigma0=reg256#7 # asm 2: vpxor X1sigma0=%ymm6 vpxor %ymm6,%ymm7,%ymm6 # qhasm: r7 += r4Sigma1 # asm 1: add maj7=int64#7 # asm 2: mov maj7=%rax mov %rdx,%rax # qhasm: maj7 &= maj6 # asm 1: and > 19 # asm 1: vpsrlq $19,W14right19=reg256#8%128 # asm 2: vpsrlq $19,W14right19=%xmm7 vpsrlq $19,%xmm8,%xmm7 # qhasm: r039 = r0>>>39 # asm 1: rorx $39,r039=int64#13 # asm 2: rorx $39,r039=%r15 rorx $39,%rsi,%r15 # qhasm: 4x X1left56 = X1 << 56 # asm 1: vpsllq $56,X1left56=reg256#10 # asm 2: vpsllq $56,X1left56=%ymm9 vpsllq $56,%ymm5,%ymm9 # qhasm: r3 += r7 # asm 1: add r0andr1=int64#13 # asm 2: mov r0andr1=%r15 mov %rcx,%r15 # qhasm: r0andr1 &= r0 # asm 1: and W14left45=reg256#11%128 # asm 2: vpsllq $45,W14left45=%xmm10 vpsllq $45,%xmm8,%xmm10 # qhasm: r7 += r0Sigma0 # asm 1: add ch6=int64#12 # asm 2: mov ch6=%r14 mov %r10,%r14 # qhasm: ch6 ^= r4 # asm 1: xor > 61 # asm 1: vpsrlq $61,W14right61=reg256#12%128 # asm 2: vpsrlq $61,W14right61=%xmm11 vpsrlq $61,%xmm8,%xmm11 # qhasm: r3Sigma1 = r3>>>14 # asm 1: rorx $14,r3Sigma1=int64#15 # asm 2: rorx $14,r3Sigma1=%rbp rorx $14,%r9,%rbp # qhasm: X1sigma0 = X1sigma0 ^ X1left56 # asm 1: vpxor X1sigma0=reg256#7 # asm 2: vpxor X1sigma0=%ymm6 vpxor %ymm6,%ymm9,%ymm6 # qhasm: r7 += maj7 # asm 1: add >>18 # asm 1: rorx $18,r318=int64#7 # asm 2: rorx $18,r318=%rax rorx $18,%r9,%rax # qhasm: 4x X1right7 = X1 unsigned>> 7 # asm 1: vpsrlq $7,X1right7=reg256#6 # asm 2: vpsrlq $7,X1right7=%ymm5 vpsrlq $7,%ymm5,%ymm5 # qhasm: 1x,0 W14sigma1 = W14right19 ^ W14left45 # asm 1: vpxor W14sigma1=reg256#8%128 # asm 2: vpxor W14sigma1=%xmm7 vpxor %xmm7,%xmm10,%xmm7 # qhasm: ch6 &= r3 # asm 1: and >>41 # asm 1: rorx $41,r341=int64#7 # asm 2: rorx $41,r341=%rax rorx $41,%r9,%rax # qhasm: maj6 &= r7 # asm 1: and X1sigma0=reg256#6 # asm 2: vpxor X1sigma0=%ymm5 vpxor %ymm6,%ymm5,%ymm5 # qhasm: 1x,0 W14sigma1 ^= W14right61 # asm 1: vpxor X0=reg256#2 # asm 2: vpaddq X0=%ymm1 vpaddq %ymm1,%ymm5,%ymm1 # qhasm: r3Sigma1 ^= r341 # asm 1: xor W14left3=reg256#6%128 # asm 2: vpsllq $3,W14left3=%xmm5 vpsllq $3,%xmm8,%xmm5 # qhasm: r7Sigma0 = r7>>>28 # asm 1: rorx $28,r7Sigma0=int64#7 # asm 2: rorx $28,r7Sigma0=%rax rorx $28,%r13,%rax # qhasm: ch6 ^= r5 # asm 1: xor X0=reg256#2 # asm 2: vpaddq X0=%ymm1 vpaddq 232(%rsp),%ymm1,%ymm1 # qhasm: r734 = r7>>>34 # asm 1: rorx $34,r734=int64#13 # asm 2: rorx $34,r734=%r15 rorx $34,%r13,%r15 # qhasm: r5 += wc891011[2] # asm 1: addq >>39 # asm 1: rorx $39,r739=int64#12 # asm 2: rorx $39,r739=%r14 rorx $39,%r13,%r14 # qhasm: r2 += r6 # asm 1: add > 6 # asm 1: vpsrlq $6,W14right6=reg256#6%128 # asm 2: vpsrlq $6,W14right6=%xmm5 vpsrlq $6,%xmm8,%xmm5 # qhasm: r7Sigma0 ^= r739 # asm 1: xor ch5=int64#12 # asm 2: mov ch5=%r14 mov %r11,%r14 # qhasm: ch5 ^= r3 # asm 1: xor >>14 # asm 1: rorx $14,r2Sigma1=int64#7 # asm 2: rorx $14,r2Sigma1=%rax rorx $14,%rdx,%rax # qhasm: 1x,0 W14sigma1 ^= W14right6 # asm 1: vpxor >>18 # asm 1: rorx $18,r218=int64#13 # asm 2: rorx $18,r218=%r15 rorx $18,%rdx,%r15 # qhasm: r241 = r2>>>41 # asm 1: rorx $41,r241=int64#14 # asm 2: rorx $41,r241=%rbx rorx $41,%rdx,%rbx # qhasm: 4x X0 = W14sigma1 + X0 # asm 1: vpaddq X0=reg256#2 # asm 2: vpaddq X0=%ymm1 vpaddq %ymm7,%ymm1,%ymm1 # qhasm: ch5 ^= r4 # asm 1: xor > 19 # asm 1: vpsrlq $19,W0right19=reg256#6%128 # asm 2: vpsrlq $19,W0right19=%xmm5 vpsrlq $19,%xmm1,%xmm5 # qhasm: r6Sigma0 = r6>>>28 # asm 1: rorx $28,r6Sigma0=int64#13 # asm 2: rorx $28,r6Sigma0=%r15 rorx $28,%r12,%r15 # qhasm: r5 += ch5 # asm 1: add W0left45=reg256#7%128 # asm 2: vpsllq $45,W0left45=%xmm6 vpsllq $45,%xmm1,%xmm6 # qhasm: r634 = r6>>>34 # asm 1: rorx $34,r634=int64#12 # asm 2: rorx $34,r634=%r14 rorx $34,%r12,%r14 # qhasm: r2Sigma1 ^= r241 # asm 1: xor maj4=int64#14 # asm 2: mov maj4=%rbx mov %r13,%rbx # qhasm: X5 = mem256[&w + 40] # asm 1: vmovupd X5=reg256#8 # asm 2: vmovupd X5=%ymm7 vmovupd 200(%rsp),%ymm7 # qhasm: maj4 ^= r6 # asm 1: xor > 61 # asm 1: vpsrlq $61,W0right61=reg256#9%128 # asm 2: vpsrlq $61,W0right61=%xmm8 vpsrlq $61,%xmm1,%xmm8 # qhasm: 1x,0 W0sigma1 = W0right19 ^ W0left45 # asm 1: vpxor W0sigma1=reg256#6%128 # asm 2: vpxor W0sigma1=%xmm5 vpxor %xmm5,%xmm6,%xmm5 # qhasm: r6Sigma0 ^= r634 # asm 1: xor >>39 # asm 1: rorx $39,r639=int64#12 # asm 2: rorx $39,r639=%r14 rorx $39,%r12,%r14 # qhasm: 2x,0 W0left3 = X0 << 3 # asm 1: vpsllq $3,W0left3=reg256#7%128 # asm 2: vpsllq $3,W0left3=%xmm6 vpsllq $3,%xmm1,%xmm6 # qhasm: r6Sigma0 ^= r639 # asm 1: xor > 6 # asm 1: vpsrlq $6,W0right6=reg256#9%128 # asm 2: vpsrlq $6,W0right6=%xmm8 vpsrlq $6,%xmm1,%xmm8 # qhasm: 1x,0 W0sigma1 ^= W0left3 # asm 1: vpxor r6andr7=int64#7 # asm 2: mov r6andr7=%rax mov %r13,%rax # qhasm: r6andr7 &= r6 # asm 1: and >>14 # asm 1: rorx $14,r1Sigma1=int64#12 # asm 2: rorx $14,r1Sigma1=%r14 rorx $14,%rcx,%r14 # qhasm: r5 += r6Sigma0 # asm 1: add maj5=int64#13 # asm 2: mov maj5=%r15 mov %rsi,%r15 # qhasm: maj5 &= maj4 # asm 1: and W0sigma1=reg256#6 # asm 2: vpermq $0x4e,W0sigma1=%ymm5 vpermq $0x4e,%ymm5,%ymm5 # qhasm: maj5 ^= r6andr7 # asm 1: xor ch4=int64#15 # asm 2: mov ch4=%rbp mov %r9,%rbp # qhasm: 4x X5right1 = X5 unsigned>> 1 # asm 1: vpsrlq $1,X5right1=reg256#7 # asm 2: vpsrlq $1,X5right1=%ymm6 vpsrlq $1,%ymm7,%ymm6 # qhasm: ch4 ^= r2 # asm 1: xor >>18 # asm 1: rorx $18,r118=int64#13 # asm 2: rorx $18,r118=%r15 rorx $18,%rcx,%r15 # qhasm: ch4 &= r1 # asm 1: and >>41 # asm 1: rorx $41,r141=int64#13 # asm 2: rorx $41,r141=%r15 rorx $41,%rcx,%r15 # qhasm: 4x X0 = X0 + W0sigma1 # asm 1: vpaddq X0=reg256#2 # asm 2: vpaddq X0=%ymm1 vpaddq %ymm1,%ymm5,%ymm1 # qhasm: r4 += ch4 # asm 1: add >>28 # asm 1: rorx $28,r5Sigma0=int64#7 # asm 2: rorx $28,r5Sigma0=%rax rorx $28,%r10,%rax # qhasm: 4x D0 = X0 + mem256[constants + 128] # asm 1: vpaddq 128(D0=reg256#6 # asm 2: vpaddq 128(D0=%ymm5 vpaddq 128(%r8),%ymm1,%ymm5 # qhasm: r1Sigma1 ^= r141 # asm 1: xor >>34 # asm 1: rorx $34,r534=int64#13 # asm 2: rorx $34,r534=%r15 rorx $34,%r10,%r15 # qhasm: mem256[&w + 128] = X0 # asm 1: vmovupd >>39 # asm 1: rorx $39,r539=int64#12 # asm 2: rorx $39,r539=%r14 rorx $39,%r10,%r14 # qhasm: r3 += wc12131415[0] # asm 1: addq wc0123=stack256#3 # asm 2: vmovapd wc0123=64(%rsp) vmovapd %ymm5,64(%rsp) # qhasm: W2 = mem128[&w + 16],0 # asm 1: vmovupd W2=reg256#6%128 # asm 2: vmovupd W2=%xmm5 vmovupd 176(%rsp),%xmm5 # qhasm: r4 += r5Sigma0 # asm 1: add >>14 # asm 1: rorx $14,r0Sigma1=int64#7 # asm 2: rorx $14,r0Sigma1=%rax rorx $14,%rsi,%rax # qhasm: ch3 = r2 # asm 1: mov ch3=int64#12 # asm 2: mov ch3=%r14 mov %rdx,%r14 # qhasm: ch3 ^= r1 # asm 1: xor X5left63=reg256#9 # asm 2: vpsllq $63,X5left63=%ymm8 vpsllq $63,%ymm7,%ymm8 # qhasm: r018 = r0>>>18 # asm 1: rorx $18,r018=int64#13 # asm 2: rorx $18,r018=%r15 rorx $18,%rsi,%r15 # qhasm: maj2 = r5 # asm 1: mov maj2=int64#14 # asm 2: mov maj2=%rbx mov %r10,%rbx # qhasm: ch3 &= r0 # asm 1: and >>41 # asm 1: rorx $41,r041=int64#15 # asm 2: rorx $41,r041=%rbp rorx $41,%rsi,%rbp # qhasm: X5sigma0 = X5right1 ^ X5left63 # asm 1: vpxor X5sigma0=reg256#7 # asm 2: vpxor X5sigma0=%ymm6 vpxor %ymm6,%ymm8,%ymm6 # qhasm: r0Sigma1 ^= r018 # asm 1: xor > 8 # asm 1: vpsrlq $8,X5right8=reg256#9 # asm 2: vpsrlq $8,X5right8=%ymm8 vpsrlq $8,%ymm7,%ymm8 # qhasm: r4Sigma0 = r4>>>28 # asm 1: rorx $28,r4Sigma0=int64#13 # asm 2: rorx $28,r4Sigma0=%r15 rorx $28,%r11,%r15 # qhasm: r0Sigma1 ^= r041 # asm 1: xor >>34 # asm 1: rorx $34,r434=int64#12 # asm 2: rorx $34,r434=%r14 rorx $34,%r11,%r14 # qhasm: X5sigma0 = X5sigma0 ^ X5right8 # asm 1: vpxor X5sigma0=reg256#7 # asm 2: vpxor X5sigma0=%ymm6 vpxor %ymm6,%ymm8,%ymm6 # qhasm: r3 += r0Sigma1 # asm 1: add maj3=int64#7 # asm 2: mov maj3=%rax mov %r12,%rax # qhasm: maj3 &= maj2 # asm 1: and >>39 # asm 1: rorx $39,r439=int64#12 # asm 2: rorx $39,r439=%r14 rorx $39,%r11,%r14 # qhasm: 2x,0 W2right19 = W2 unsigned>> 19 # asm 1: vpsrlq $19,W2right19=reg256#9%128 # asm 2: vpsrlq $19,W2right19=%xmm8 vpsrlq $19,%xmm5,%xmm8 # qhasm: 4x X5left56 = X5 << 56 # asm 1: vpsllq $56,X5left56=reg256#10 # asm 2: vpsllq $56,X5left56=%ymm9 vpsllq $56,%ymm7,%ymm9 # qhasm: r7 += r3 # asm 1: add r4andr5=int64#12 # asm 2: mov r4andr5=%r14 mov %r10,%r14 # qhasm: r4andr5 &= r4 # asm 1: and W2left45=reg256#11%128 # asm 2: vpsllq $45,W2left45=%xmm10 vpsllq $45,%xmm5,%xmm10 # qhasm: r3 += r4Sigma0 # asm 1: add ch2=int64#13 # asm 2: mov ch2=%r15 mov %rcx,%r15 # qhasm: ch2 ^= r0 # asm 1: xor > 61 # asm 1: vpsrlq $61,W2right61=reg256#12%128 # asm 2: vpsrlq $61,W2right61=%xmm11 vpsrlq $61,%xmm5,%xmm11 # qhasm: r7Sigma1 = r7>>>14 # asm 1: rorx $14,r7Sigma1=int64#15 # asm 2: rorx $14,r7Sigma1=%rbp rorx $14,%r13,%rbp # qhasm: X5sigma0 = X5sigma0 ^ X5left56 # asm 1: vpxor X5sigma0=reg256#7 # asm 2: vpxor X5sigma0=%ymm6 vpxor %ymm6,%ymm9,%ymm6 # qhasm: r3 += maj3 # asm 1: add > 7 # asm 1: vpsrlq $7,X5right7=reg256#8 # asm 2: vpsrlq $7,X5right7=%ymm7 vpsrlq $7,%ymm7,%ymm7 # qhasm: r718 = r7>>>18 # asm 1: rorx $18,r718=int64#7 # asm 2: rorx $18,r718=%rax rorx $18,%r13,%rax # qhasm: 1x,0 W2sigma1 = W2right19 ^ W2left45 # asm 1: vpxor W2sigma1=reg256#9%128 # asm 2: vpxor W2sigma1=%xmm8 vpxor %xmm8,%xmm10,%xmm8 # qhasm: ch2 &= r7 # asm 1: and X5sigma0=reg256#7 # asm 2: vpxor X5sigma0=%ymm6 vpxor %ymm6,%ymm7,%ymm6 # qhasm: r741 = r7>>>41 # asm 1: rorx $41,r741=int64#7 # asm 2: rorx $41,r741=%rax rorx $41,%r13,%rax # qhasm: maj2 &= r3 # asm 1: and X4=reg256#3 # asm 2: vpaddq X4=%ymm2 vpaddq %ymm2,%ymm6,%ymm2 # qhasm: r7Sigma1 ^= r741 # asm 1: xor W2left3=reg256#7%128 # asm 2: vpsllq $3,W2left3=%xmm6 vpsllq $3,%xmm5,%xmm6 # qhasm: r3Sigma0 = r3>>>28 # asm 1: rorx $28,r3Sigma0=int64#7 # asm 2: rorx $28,r3Sigma0=%rax rorx $28,%r9,%rax # qhasm: ch2 ^= r1 # asm 1: xor >>34 # asm 1: rorx $34,r334=int64#12 # asm 2: rorx $34,r334=%r14 rorx $34,%r9,%r14 # qhasm: 4x X4 = X4 + mem256[&w + 104] # asm 1: vpaddq X4=reg256#3 # asm 2: vpaddq X4=%ymm2 vpaddq 264(%rsp),%ymm2,%ymm2 # qhasm: r1 += wc12131415[2] # asm 1: addq >>39 # asm 1: rorx $39,r339=int64#12 # asm 2: rorx $39,r339=%r14 rorx $39,%r9,%r14 # qhasm: r6 += r2 # asm 1: add > 6 # asm 1: vpsrlq $6,W2right6=reg256#6%128 # asm 2: vpsrlq $6,W2right6=%xmm5 vpsrlq $6,%xmm5,%xmm5 # qhasm: r3Sigma0 ^= r339 # asm 1: xor ch1=int64#12 # asm 2: mov ch1=%r14 mov %rsi,%r14 # qhasm: ch1 ^= r7 # asm 1: xor >>14 # asm 1: rorx $14,r6Sigma1=int64#7 # asm 2: rorx $14,r6Sigma1=%rax rorx $14,%r12,%rax # qhasm: 1x,0 W2sigma1 ^= W2right6 # asm 1: vpxor >>18 # asm 1: rorx $18,r618=int64#13 # asm 2: rorx $18,r618=%r15 rorx $18,%r12,%r15 # qhasm: r641 = r6>>>41 # asm 1: rorx $41,r641=int64#14 # asm 2: rorx $41,r641=%rbx rorx $41,%r12,%rbx # qhasm: 4x X4 = W2sigma1 + X4 # asm 1: vpaddq X4=reg256#3 # asm 2: vpaddq X4=%ymm2 vpaddq %ymm8,%ymm2,%ymm2 # qhasm: ch1 ^= r0 # asm 1: xor > 19 # asm 1: vpsrlq $19,W4right19=reg256#6%128 # asm 2: vpsrlq $19,W4right19=%xmm5 vpsrlq $19,%xmm2,%xmm5 # qhasm: r1 += ch1 # asm 1: add >>28 # asm 1: rorx $28,r2Sigma0=int64#12 # asm 2: rorx $28,r2Sigma0=%r14 rorx $28,%rdx,%r14 # qhasm: 2x,0 W4left45 = X4 << 45 # asm 1: vpsllq $45,W4left45=reg256#7%128 # asm 2: vpsllq $45,W4left45=%xmm6 vpsllq $45,%xmm2,%xmm6 # qhasm: r6Sigma1 ^= r641 # asm 1: xor >>34 # asm 1: rorx $34,r234=int64#13 # asm 2: rorx $34,r234=%r15 rorx $34,%rdx,%r15 # qhasm: maj0 = r3 # asm 1: mov maj0=int64#14 # asm 2: mov maj0=%rbx mov %r9,%rbx # qhasm: maj0 ^= r2 # asm 1: xor > 61 # asm 1: vpsrlq $61,W4right61=reg256#8%128 # asm 2: vpsrlq $61,W4right61=%xmm7 vpsrlq $61,%xmm2,%xmm7 # qhasm: X9 = mem256[&w + 72] # asm 1: vmovupd X9=reg256#9 # asm 2: vmovupd X9=%ymm8 vmovupd 232(%rsp),%ymm8 # qhasm: r2Sigma0 ^= r234 # asm 1: xor W4sigma1=reg256#6%128 # asm 2: vpxor W4sigma1=%xmm5 vpxor %xmm5,%xmm6,%xmm5 # qhasm: r239 = r2>>>39 # asm 1: rorx $39,r239=int64#13 # asm 2: rorx $39,r239=%r15 rorx $39,%rdx,%r15 # qhasm: 2x,0 W4left3 = X4 << 3 # asm 1: vpsllq $3,W4left3=reg256#7%128 # asm 2: vpsllq $3,W4left3=%xmm6 vpsllq $3,%xmm2,%xmm6 # qhasm: 1x,0 W4sigma1 ^= W4right61 # asm 1: vpxor > 6 # asm 1: vpsrlq $6,W4right6=reg256#8%128 # asm 2: vpsrlq $6,W4right6=%xmm7 vpsrlq $6,%xmm2,%xmm7 # qhasm: r2Sigma0 ^= r239 # asm 1: xor r2andr3=int64#7 # asm 2: mov r2andr3=%rax mov %r9,%rax # qhasm: r5Sigma1 = r5>>>14 # asm 1: rorx $14,r5Sigma1=int64#13 # asm 2: rorx $14,r5Sigma1=%r15 rorx $14,%r10,%r15 # qhasm: r2andr3 &= r2 # asm 1: and maj1=int64#12 # asm 2: mov maj1=%r14 mov %r11,%r14 # qhasm: maj1 &= maj0 # asm 1: and W4sigma1=reg256#6 # asm 2: vpermq $0x4e,W4sigma1=%ymm5 vpermq $0x4e,%ymm5,%ymm5 # qhasm: maj1 ^= r2andr3 # asm 1: xor ch0=int64#15 # asm 2: mov ch0=%rbp mov %r13,%rbp # qhasm: 4x X9right1 = X9 unsigned>> 1 # asm 1: vpsrlq $1,X9right1=reg256#7 # asm 2: vpsrlq $1,X9right1=%ymm6 vpsrlq $1,%ymm8,%ymm6 # qhasm: ch0 ^= r6 # asm 1: xor >>18 # asm 1: rorx $18,r518=int64#12 # asm 2: rorx $18,r518=%r14 rorx $18,%r10,%r14 # qhasm: maj0 &= r1 # asm 1: and X4=reg256#3 # asm 2: vpaddq X4=%ymm2 vpaddq %ymm2,%ymm5,%ymm2 # qhasm: r541 = r5>>>41 # asm 1: rorx $41,r541=int64#12 # asm 2: rorx $41,r541=%r14 rorx $41,%r10,%r14 # qhasm: mem256[&w + 32] = X4 # asm 1: vmovupd >>28 # asm 1: rorx $28,r1Sigma0=int64#7 # asm 2: rorx $28,r1Sigma0=%rax rorx $28,%rcx,%rax # qhasm: 4x D4 = X4 + mem256[constants + 32] # asm 1: vpaddq 32(D4=reg256#6 # asm 2: vpaddq 32(D4=%ymm5 vpaddq 32(%r8),%ymm2,%ymm5 # qhasm: wc4567 = D4 # asm 1: vmovapd wc4567=stack256#4 # asm 2: vmovapd wc4567=96(%rsp) vmovapd %ymm5,96(%rsp) # qhasm: r5Sigma1 ^= r541 # asm 1: xor >>34 # asm 1: rorx $34,r134=int64#12 # asm 2: rorx $34,r134=%r14 rorx $34,%rcx,%r14 # qhasm: r0 += r5Sigma1 # asm 1: add >>39 # asm 1: rorx $39,r139=int64#12 # asm 2: rorx $39,r139=%r14 rorx $39,%rcx,%r14 # qhasm: r4 += r0 # asm 1: add >>14 # asm 1: rorx $14,r4Sigma1=int64#12 # asm 2: rorx $14,r4Sigma1=%r14 rorx $14,%r11,%r14 # qhasm: W6 = mem128[&w + 48],0 # asm 1: vmovupd W6=reg256#6%128 # asm 2: vmovupd W6=%xmm5 vmovupd 208(%rsp),%xmm5 # qhasm: r0 += r1Sigma0 # asm 1: add ch7=int64#7 # asm 2: mov ch7=%rax mov %r12,%rax # qhasm: ch7 ^= r5 # asm 1: xor >>18 # asm 1: rorx $18,r418=int64#13 # asm 2: rorx $18,r418=%r15 rorx $18,%r11,%r15 # qhasm: 4x X9left63 = X9 << 63 # asm 1: vpsllq $63,X9left63=reg256#8 # asm 2: vpsllq $63,X9left63=%ymm7 vpsllq $63,%ymm8,%ymm7 # qhasm: ch7 &= r4 # asm 1: and maj6=int64#14 # asm 2: mov maj6=%rbx mov %rcx,%rbx # qhasm: maj6 ^= r0 # asm 1: xor >>41 # asm 1: rorx $41,r441=int64#15 # asm 2: rorx $41,r441=%rbp rorx $41,%r11,%rbp # qhasm: X9sigma0 = X9right1 ^ X9left63 # asm 1: vpxor X9sigma0=reg256#7 # asm 2: vpxor X9sigma0=%ymm6 vpxor %ymm6,%ymm7,%ymm6 # qhasm: r4Sigma1 ^= r418 # asm 1: xor > 8 # asm 1: vpsrlq $8,X9right8=reg256#8 # asm 2: vpsrlq $8,X9right8=%ymm7 vpsrlq $8,%ymm8,%ymm7 # qhasm: ch7 ^= r6 # asm 1: xor >>28 # asm 1: rorx $28,r0Sigma0=int64#13 # asm 2: rorx $28,r0Sigma0=%r15 rorx $28,%rsi,%r15 # qhasm: r7 += ch7 # asm 1: add X9sigma0=reg256#7 # asm 2: vpxor X9sigma0=%ymm6 vpxor %ymm6,%ymm7,%ymm6 # qhasm: r7 += r4Sigma1 # asm 1: add >>34 # asm 1: rorx $34,r034=int64#7 # asm 2: rorx $34,r034=%rax rorx $34,%rsi,%rax # qhasm: maj7 = r2 # asm 1: mov maj7=int64#12 # asm 2: mov maj7=%r14 mov %rdx,%r14 # qhasm: maj7 &= maj6 # asm 1: and > 19 # asm 1: vpsrlq $19,W6right19=reg256#8%128 # asm 2: vpsrlq $19,W6right19=%xmm7 vpsrlq $19,%xmm5,%xmm7 # qhasm: r039 = r0>>>39 # asm 1: rorx $39,r039=int64#7 # asm 2: rorx $39,r039=%rax rorx $39,%rsi,%rax # qhasm: r3 += r7 # asm 1: add X9left56=reg256#10 # asm 2: vpsllq $56,X9left56=%ymm9 vpsllq $56,%ymm8,%ymm9 # qhasm: r6 += wc0123[1] # asm 1: addq r0andr1=int64#7 # asm 2: mov r0andr1=%rax mov %rcx,%rax # qhasm: r0andr1 &= r0 # asm 1: and ch6=int64#13 # asm 2: mov ch6=%r15 mov %r10,%r15 # qhasm: 2x,0 W6left45 = W6 << 45 # asm 1: vpsllq $45,W6left45=reg256#11%128 # asm 2: vpsllq $45,W6left45=%xmm10 vpsllq $45,%xmm5,%xmm10 # qhasm: maj7 ^= r0andr1 # asm 1: xor > 61 # asm 1: vpsrlq $61,W6right61=reg256#12%128 # asm 2: vpsrlq $61,W6right61=%xmm11 vpsrlq $61,%xmm5,%xmm11 # qhasm: r3Sigma1 = r3>>>14 # asm 1: rorx $14,r3Sigma1=int64#15 # asm 2: rorx $14,r3Sigma1=%rbp rorx $14,%r9,%rbp # qhasm: r7 += maj7 # asm 1: add X9sigma0=reg256#7 # asm 2: vpxor X9sigma0=%ymm6 vpxor %ymm6,%ymm9,%ymm6 # qhasm: 4x X9right7 = X9 unsigned>> 7 # asm 1: vpsrlq $7,X9right7=reg256#9 # asm 2: vpsrlq $7,X9right7=%ymm8 vpsrlq $7,%ymm8,%ymm8 # qhasm: r318 = r3>>>18 # asm 1: rorx $18,r318=int64#12 # asm 2: rorx $18,r318=%r14 rorx $18,%r9,%r14 # qhasm: 1x,0 W6sigma1 = W6right19 ^ W6left45 # asm 1: vpxor W6sigma1=reg256#8%128 # asm 2: vpxor W6sigma1=%xmm7 vpxor %xmm7,%xmm10,%xmm7 # qhasm: ch6 &= r3 # asm 1: and >>41 # asm 1: rorx $41,r341=int64#12 # asm 2: rorx $41,r341=%r14 rorx $41,%r9,%r14 # qhasm: X9sigma0 = X9sigma0 ^ X9right7 # asm 1: vpxor X9sigma0=reg256#7 # asm 2: vpxor X9sigma0=%ymm6 vpxor %ymm6,%ymm8,%ymm6 # qhasm: maj6 &= r7 # asm 1: and X8=reg256#4 # asm 2: vpaddq X8=%ymm3 vpaddq %ymm3,%ymm6,%ymm3 # qhasm: r3Sigma1 ^= r341 # asm 1: xor W6left3=reg256#7%128 # asm 2: vpsllq $3,W6left3=%xmm6 vpsllq $3,%xmm5,%xmm6 # qhasm: r7Sigma0 = r7>>>28 # asm 1: rorx $28,r7Sigma0=int64#7 # asm 2: rorx $28,r7Sigma0=%rax rorx $28,%r13,%rax # qhasm: ch6 ^= r5 # asm 1: xor X8=reg256#4 # asm 2: vpaddq X8=%ymm3 vpaddq 168(%rsp),%ymm3,%ymm3 # qhasm: r734 = r7>>>34 # asm 1: rorx $34,r734=int64#12 # asm 2: rorx $34,r734=%r14 rorx $34,%r13,%r14 # qhasm: r5 += wc0123[2] # asm 1: addq >>39 # asm 1: rorx $39,r739=int64#12 # asm 2: rorx $39,r739=%r14 rorx $39,%r13,%r14 # qhasm: r2 += r6 # asm 1: add ch5=int64#12 # asm 2: mov ch5=%r14 mov %r11,%r14 # qhasm: 2x,0 W6right6 = W6 unsigned>> 6 # asm 1: vpsrlq $6,W6right6=reg256#6%128 # asm 2: vpsrlq $6,W6right6=%xmm5 vpsrlq $6,%xmm5,%xmm5 # qhasm: r6 += maj6 # asm 1: add >>14 # asm 1: rorx $14,r2Sigma1=int64#7 # asm 2: rorx $14,r2Sigma1=%rax rorx $14,%rdx,%rax # qhasm: 1x,0 W6sigma1 ^= W6right6 # asm 1: vpxor >>18 # asm 1: rorx $18,r218=int64#13 # asm 2: rorx $18,r218=%r15 rorx $18,%rdx,%r15 # qhasm: r241 = r2>>>41 # asm 1: rorx $41,r241=int64#14 # asm 2: rorx $41,r241=%rbx rorx $41,%rdx,%rbx # qhasm: 4x X8 = W6sigma1 + X8 # asm 1: vpaddq X8=reg256#4 # asm 2: vpaddq X8=%ymm3 vpaddq %ymm7,%ymm3,%ymm3 # qhasm: 2x,0 W8right19 = X8 unsigned>> 19 # asm 1: vpsrlq $19,W8right19=reg256#6%128 # asm 2: vpsrlq $19,W8right19=%xmm5 vpsrlq $19,%xmm3,%xmm5 # qhasm: ch5 ^= r4 # asm 1: xor >>28 # asm 1: rorx $28,r6Sigma0=int64#13 # asm 2: rorx $28,r6Sigma0=%r15 rorx $28,%r12,%r15 # qhasm: r5 += ch5 # asm 1: add W8left45=reg256#7%128 # asm 2: vpsllq $45,W8left45=%xmm6 vpsllq $45,%xmm3,%xmm6 # qhasm: r634 = r6>>>34 # asm 1: rorx $34,r634=int64#12 # asm 2: rorx $34,r634=%r14 rorx $34,%r12,%r14 # qhasm: r2Sigma1 ^= r241 # asm 1: xor maj4=int64#14 # asm 2: mov maj4=%rbx mov %r13,%rbx # qhasm: 2x,0 W8right61 = X8 unsigned>> 61 # asm 1: vpsrlq $61,W8right61=reg256#8%128 # asm 2: vpsrlq $61,W8right61=%xmm7 vpsrlq $61,%xmm3,%xmm7 # qhasm: maj4 ^= r6 # asm 1: xor W8sigma1=reg256#6%128 # asm 2: vpxor W8sigma1=%xmm5 vpxor %xmm5,%xmm6,%xmm5 # qhasm: r639 = r6>>>39 # asm 1: rorx $39,r639=int64#12 # asm 2: rorx $39,r639=%r14 rorx $39,%r12,%r14 # qhasm: 1x,0 W8sigma1 ^= W8right61 # asm 1: vpxor W8left3=reg256#7%128 # asm 2: vpsllq $3,W8left3=%xmm6 vpsllq $3,%xmm3,%xmm6 # qhasm: r6Sigma0 ^= r639 # asm 1: xor > 6 # asm 1: vpsrlq $6,W8right6=reg256#8%128 # asm 2: vpsrlq $6,W8right6=%xmm7 vpsrlq $6,%xmm3,%xmm7 # qhasm: 1x,0 W8sigma1 ^= W8left3 # asm 1: vpxor r6andr7=int64#7 # asm 2: mov r6andr7=%rax mov %r13,%rax # qhasm: r6andr7 &= r6 # asm 1: and >>14 # asm 1: rorx $14,r1Sigma1=int64#12 # asm 2: rorx $14,r1Sigma1=%r14 rorx $14,%rcx,%r14 # qhasm: 1x,0 W8sigma1 ^= W8right6 # asm 1: vpxor maj5=int64#13 # asm 2: mov maj5=%r15 mov %rsi,%r15 # qhasm: maj5 &= maj4 # asm 1: and W8sigma1=reg256#6 # asm 2: vpermq $0x4e,W8sigma1=%ymm5 vpermq $0x4e,%ymm5,%ymm5 # qhasm: maj5 ^= r6andr7 # asm 1: xor ch4=int64#15 # asm 2: mov ch4=%rbp mov %r9,%rbp # qhasm: ch4 ^= r2 # asm 1: xor X13=reg256#7 # asm 2: vmovupd X13=%ymm6 vmovupd 264(%rsp),%ymm6 # qhasm: 4x X13right1 = X13 unsigned>> 1 # asm 1: vpsrlq $1,X13right1=reg256#8 # asm 2: vpsrlq $1,X13right1=%ymm7 vpsrlq $1,%ymm6,%ymm7 # qhasm: r5 += maj5 # asm 1: add >>18 # asm 1: rorx $18,r118=int64#13 # asm 2: rorx $18,r118=%r15 rorx $18,%rcx,%r15 # qhasm: ch4 &= r1 # asm 1: and >>41 # asm 1: rorx $41,r141=int64#13 # asm 2: rorx $41,r141=%r15 rorx $41,%rcx,%r15 # qhasm: 4x X8 = X8 + W8sigma1 # asm 1: vpaddq X8=reg256#4 # asm 2: vpaddq X8=%ymm3 vpaddq %ymm3,%ymm5,%ymm3 # qhasm: mem256[&w + 64] = X8 # asm 1: vmovupd >>28 # asm 1: rorx $28,r5Sigma0=int64#7 # asm 2: rorx $28,r5Sigma0=%rax rorx $28,%r10,%rax # qhasm: 4x D8 = X8 + mem256[constants + 64] # asm 1: vpaddq 64(D8=reg256#6 # asm 2: vpaddq 64(D8=%ymm5 vpaddq 64(%r8),%ymm3,%ymm5 # qhasm: wc891011 = D8 # asm 1: vmovapd wc891011=stack256#3 # asm 2: vmovapd wc891011=64(%rsp) vmovapd %ymm5,64(%rsp) # qhasm: r534 = r5>>>34 # asm 1: rorx $34,r534=int64#15 # asm 2: rorx $34,r534=%rbp rorx $34,%r10,%rbp # qhasm: r1Sigma1 ^= r141 # asm 1: xor >>39 # asm 1: rorx $39,r539=int64#12 # asm 2: rorx $39,r539=%r14 rorx $39,%r10,%r14 # qhasm: r3 += wc4567[0] # asm 1: addq W10=reg256#6%128 # asm 2: vmovupd W10=%xmm5 vmovupd 240(%rsp),%xmm5 # qhasm: r5Sigma0 ^= r539 # asm 1: xor >>14 # asm 1: rorx $14,r0Sigma1=int64#12 # asm 2: rorx $14,r0Sigma1=%r14 rorx $14,%rsi,%r14 # qhasm: r4 += r5Sigma0 # asm 1: add ch3=int64#7 # asm 2: mov ch3=%rax mov %rdx,%rax # qhasm: ch3 ^= r1 # asm 1: xor X13left63=reg256#9 # asm 2: vpsllq $63,X13left63=%ymm8 vpsllq $63,%ymm6,%ymm8 # qhasm: r018 = r0>>>18 # asm 1: rorx $18,r018=int64#13 # asm 2: rorx $18,r018=%r15 rorx $18,%rsi,%r15 # qhasm: maj2 = r5 # asm 1: mov maj2=int64#14 # asm 2: mov maj2=%rbx mov %r10,%rbx # qhasm: maj2 ^= r4 # asm 1: xor X13sigma0=reg256#8 # asm 2: vpxor X13sigma0=%ymm7 vpxor %ymm7,%ymm8,%ymm7 # qhasm: r041 = r0>>>41 # asm 1: rorx $41,r041=int64#15 # asm 2: rorx $41,r041=%rbp rorx $41,%rsi,%rbp # qhasm: r0Sigma1 ^= r018 # asm 1: xor > 8 # asm 1: vpsrlq $8,X13right8=reg256#9 # asm 2: vpsrlq $8,X13right8=%ymm8 vpsrlq $8,%ymm6,%ymm8 # qhasm: r4Sigma0 = r4>>>28 # asm 1: rorx $28,r4Sigma0=int64#13 # asm 2: rorx $28,r4Sigma0=%r15 rorx $28,%r11,%r15 # qhasm: r0Sigma1 ^= r041 # asm 1: xor >>34 # asm 1: rorx $34,r434=int64#7 # asm 2: rorx $34,r434=%rax rorx $34,%r11,%rax # qhasm: X13sigma0 = X13sigma0 ^ X13right8 # asm 1: vpxor X13sigma0=reg256#8 # asm 2: vpxor X13sigma0=%ymm7 vpxor %ymm7,%ymm8,%ymm7 # qhasm: r3 += r0Sigma1 # asm 1: add maj3=int64#12 # asm 2: mov maj3=%r14 mov %r12,%r14 # qhasm: maj3 &= maj2 # asm 1: and > 19 # asm 1: vpsrlq $19,W10right19=reg256#9%128 # asm 2: vpsrlq $19,W10right19=%xmm8 vpsrlq $19,%xmm5,%xmm8 # qhasm: r4Sigma0 ^= r434 # asm 1: xor >>39 # asm 1: rorx $39,r439=int64#7 # asm 2: rorx $39,r439=%rax rorx $39,%r11,%rax # qhasm: 4x X13left56 = X13 << 56 # asm 1: vpsllq $56,X13left56=reg256#10 # asm 2: vpsllq $56,X13left56=%ymm9 vpsllq $56,%ymm6,%ymm9 # qhasm: r4Sigma0 ^= r439 # asm 1: xor r4andr5=int64#7 # asm 2: mov r4andr5=%rax mov %r10,%rax # qhasm: 2x,0 W10left45 = W10 << 45 # asm 1: vpsllq $45,W10left45=reg256#11%128 # asm 2: vpsllq $45,W10left45=%xmm10 vpsllq $45,%xmm5,%xmm10 # qhasm: r4andr5 &= r4 # asm 1: and ch2=int64#13 # asm 2: mov ch2=%r15 mov %rcx,%r15 # qhasm: 2x,0 W10right61 = W10 unsigned>> 61 # asm 1: vpsrlq $61,W10right61=reg256#12%128 # asm 2: vpsrlq $61,W10right61=%xmm11 vpsrlq $61,%xmm5,%xmm11 # qhasm: ch2 ^= r0 # asm 1: xor X13sigma0=reg256#8 # asm 2: vpxor X13sigma0=%ymm7 vpxor %ymm7,%ymm9,%ymm7 # qhasm: r7Sigma1 = r7>>>14 # asm 1: rorx $14,r7Sigma1=int64#15 # asm 2: rorx $14,r7Sigma1=%rbp rorx $14,%r13,%rbp # qhasm: 4x X13right7 = X13 unsigned>> 7 # asm 1: vpsrlq $7,X13right7=reg256#7 # asm 2: vpsrlq $7,X13right7=%ymm6 vpsrlq $7,%ymm6,%ymm6 # qhasm: r3 += maj3 # asm 1: add >>18 # asm 1: rorx $18,r718=int64#12 # asm 2: rorx $18,r718=%r14 rorx $18,%r13,%r14 # qhasm: 1x,0 W10sigma1 = W10right19 ^ W10left45 # asm 1: vpxor W10sigma1=reg256#9%128 # asm 2: vpxor W10sigma1=%xmm8 vpxor %xmm8,%xmm10,%xmm8 # qhasm: ch2 &= r7 # asm 1: and >>41 # asm 1: rorx $41,r741=int64#12 # asm 2: rorx $41,r741=%r14 rorx $41,%r13,%r14 # qhasm: X13sigma0 = X13sigma0 ^ X13right7 # asm 1: vpxor X13sigma0=reg256#7 # asm 2: vpxor X13sigma0=%ymm6 vpxor %ymm7,%ymm6,%ymm6 # qhasm: maj2 &= r3 # asm 1: and X12=reg256#5 # asm 2: vpaddq X12=%ymm4 vpaddq %ymm4,%ymm6,%ymm4 # qhasm: r7Sigma1 ^= r741 # asm 1: xor W10left3=reg256#7%128 # asm 2: vpsllq $3,W10left3=%xmm6 vpsllq $3,%xmm5,%xmm6 # qhasm: r3Sigma0 = r3>>>28 # asm 1: rorx $28,r3Sigma0=int64#7 # asm 2: rorx $28,r3Sigma0=%rax rorx $28,%r9,%rax # qhasm: ch2 ^= r1 # asm 1: xor >>34 # asm 1: rorx $34,r334=int64#12 # asm 2: rorx $34,r334=%r14 rorx $34,%r9,%r14 # qhasm: 4x X12 = X12 + mem256[&w + 40] # asm 1: vpaddq X12=reg256#5 # asm 2: vpaddq X12=%ymm4 vpaddq 200(%rsp),%ymm4,%ymm4 # qhasm: r1 += wc4567[2] # asm 1: addq >>39 # asm 1: rorx $39,r339=int64#12 # asm 2: rorx $39,r339=%r14 rorx $39,%r9,%r14 # qhasm: 2x,0 W10right6 = W10 unsigned>> 6 # asm 1: vpsrlq $6,W10right6=reg256#6%128 # asm 2: vpsrlq $6,W10right6=%xmm5 vpsrlq $6,%xmm5,%xmm5 # qhasm: r6 += r2 # asm 1: add ch1=int64#12 # asm 2: mov ch1=%r14 mov %rsi,%r14 # qhasm: r2 += maj2 # asm 1: add >>14 # asm 1: rorx $14,r6Sigma1=int64#7 # asm 2: rorx $14,r6Sigma1=%rax rorx $14,%r12,%rax # qhasm: 1x,0 W10sigma1 ^= W10right6 # asm 1: vpxor >>18 # asm 1: rorx $18,r618=int64#13 # asm 2: rorx $18,r618=%r15 rorx $18,%r12,%r15 # qhasm: ch1 ^= r0 # asm 1: xor X12=reg256#5 # asm 2: vpaddq X12=%ymm4 vpaddq %ymm8,%ymm4,%ymm4 # qhasm: r641 = r6>>>41 # asm 1: rorx $41,r641=int64#14 # asm 2: rorx $41,r641=%rbx rorx $41,%r12,%rbx # qhasm: 2x,0 W12right19 = X12 unsigned>> 19 # asm 1: vpsrlq $19,W12right19=reg256#6%128 # asm 2: vpsrlq $19,W12right19=%xmm5 vpsrlq $19,%xmm4,%xmm5 # qhasm: r6Sigma1 ^= r618 # asm 1: xor >>28 # asm 1: rorx $28,r2Sigma0=int64#12 # asm 2: rorx $28,r2Sigma0=%r14 rorx $28,%rdx,%r14 # qhasm: 2x,0 W12left45 = X12 << 45 # asm 1: vpsllq $45,W12left45=reg256#7%128 # asm 2: vpsllq $45,W12left45=%xmm6 vpsllq $45,%xmm4,%xmm6 # qhasm: r6Sigma1 ^= r641 # asm 1: xor >>34 # asm 1: rorx $34,r234=int64#13 # asm 2: rorx $34,r234=%r15 rorx $34,%rdx,%r15 # qhasm: maj0 = r3 # asm 1: mov maj0=int64#14 # asm 2: mov maj0=%rbx mov %r9,%rbx # qhasm: maj0 ^= r2 # asm 1: xor > 61 # asm 1: vpsrlq $61,W12right61=reg256#8%128 # asm 2: vpsrlq $61,W12right61=%xmm7 vpsrlq $61,%xmm4,%xmm7 # qhasm: r239 = r2>>>39 # asm 1: rorx $39,r239=int64#13 # asm 2: rorx $39,r239=%r15 rorx $39,%rdx,%r15 # qhasm: 1x,0 W12sigma1 = W12right19 ^ W12left45 # asm 1: vpxor W12sigma1=reg256#6%128 # asm 2: vpxor W12sigma1=%xmm5 vpxor %xmm5,%xmm6,%xmm5 # qhasm: 2x,0 W12left3 = X12 << 3 # asm 1: vpsllq $3,W12left3=reg256#7%128 # asm 2: vpsllq $3,W12left3=%xmm6 vpsllq $3,%xmm4,%xmm6 # qhasm: 1x,0 W12sigma1 ^= W12right61 # asm 1: vpxor > 6 # asm 1: vpsrlq $6,W12right6=reg256#8%128 # asm 2: vpsrlq $6,W12right6=%xmm7 vpsrlq $6,%xmm4,%xmm7 # qhasm: r5 += r1 # asm 1: add r2andr3=int64#7 # asm 2: mov r2andr3=%rax mov %r9,%rax # qhasm: r2andr3 &= r2 # asm 1: and >>14 # asm 1: rorx $14,r5Sigma1=int64#13 # asm 2: rorx $14,r5Sigma1=%r15 rorx $14,%r10,%r15 # qhasm: r1 += r2Sigma0 # asm 1: add maj1=int64#12 # asm 2: mov maj1=%r14 mov %r11,%r14 # qhasm: maj1 &= maj0 # asm 1: and W12sigma1=reg256#6 # asm 2: vpermq $0x4e,W12sigma1=%ymm5 vpermq $0x4e,%ymm5,%ymm5 # qhasm: maj1 ^= r2andr3 # asm 1: xor ch0=int64#15 # asm 2: mov ch0=%rbp mov %r13,%rbp # qhasm: ch0 ^= r6 # asm 1: xor >>18 # asm 1: rorx $18,r518=int64#12 # asm 2: rorx $18,r518=%r14 rorx $18,%r10,%r14 # qhasm: maj0 &= r1 # asm 1: and >>41 # asm 1: rorx $41,r541=int64#12 # asm 2: rorx $41,r541=%r14 rorx $41,%r10,%r14 # qhasm: 4x X12 = X12 + W12sigma1 # asm 1: vpaddq X12=reg256#5 # asm 2: vpaddq X12=%ymm4 vpaddq %ymm4,%ymm5,%ymm4 # qhasm: r0 += ch0 # asm 1: add >>28 # asm 1: rorx $28,r1Sigma0=int64#7 # asm 2: rorx $28,r1Sigma0=%rax rorx $28,%rcx,%rax # qhasm: 4x D12 = X12 + mem256[constants + 96] # asm 1: vpaddq 96(D12=reg256#6 # asm 2: vpaddq 96(D12=%ymm5 vpaddq 96(%r8),%ymm4,%ymm5 # qhasm: wc12131415 = D12 # asm 1: vmovapd wc12131415=stack256#5 # asm 2: vmovapd wc12131415=128(%rsp) vmovapd %ymm5,128(%rsp) # qhasm: r5Sigma1 ^= r541 # asm 1: xor >>34 # asm 1: rorx $34,r134=int64#12 # asm 2: rorx $34,r134=%r14 rorx $34,%rcx,%r14 # qhasm: r0 += r5Sigma1 # asm 1: add >>39 # asm 1: rorx $39,r139=int64#12 # asm 2: rorx $39,r139=%r14 rorx $39,%rcx,%r14 # qhasm: r4 += r0 # asm 1: add >>14 # asm 1: rorx $14,r4Sigma1=int64#1 # asm 2: rorx $14,r4Sigma1=%rdi rorx $14,%r11,%rdi # qhasm: ch7 = r6 # asm 1: mov ch7=int64#5 # asm 2: mov ch7=%r8 mov %r12,%r8 # qhasm: r418 = r4>>>18 # asm 1: rorx $18,r418=int64#7 # asm 2: rorx $18,r418=%rax rorx $18,%r11,%rax # qhasm: ch7 ^= r5 # asm 1: xor >>41 # asm 1: rorx $41,r441=int64#12 # asm 2: rorx $41,r441=%r14 rorx $41,%r11,%r14 # qhasm: r4Sigma1 ^= r418 # asm 1: xor >>28 # asm 1: rorx $28,r0Sigma0=int64#7 # asm 2: rorx $28,r0Sigma0=%rax rorx $28,%rsi,%rax # qhasm: r4Sigma1 ^= r441 # asm 1: xor >>34 # asm 1: rorx $34,r034=int64#12 # asm 2: rorx $34,r034=%r14 rorx $34,%rsi,%r14 # qhasm: maj6 = r1 # asm 1: mov maj6=int64#13 # asm 2: mov maj6=%r15 mov %rcx,%r15 # qhasm: maj6 ^= r0 # asm 1: xor r0andr1=int64#5 # asm 2: mov r0andr1=%r8 mov %rcx,%r8 # qhasm: r039 = r0>>>39 # asm 1: rorx $39,r039=int64#12 # asm 2: rorx $39,r039=%r14 rorx $39,%rsi,%r14 # qhasm: r0andr1 &= r0 # asm 1: and maj7=int64#1 # asm 2: mov maj7=%rdi mov %rdx,%rdi # qhasm: r6 += wc891011[1] # asm 1: addq ch6=int64#7 # asm 2: mov ch6=%rax mov %r10,%rax # qhasm: maj7 ^= r0andr1 # asm 1: xor >>14 # asm 1: rorx $14,r3Sigma1=int64#12 # asm 2: rorx $14,r3Sigma1=%r14 rorx $14,%r9,%r14 # qhasm: r7 += maj7 # asm 1: add >>18 # asm 1: rorx $18,r318=int64#1 # asm 2: rorx $18,r318=%rdi rorx $18,%r9,%rdi # qhasm: r3Sigma1 ^= r318 # asm 1: xor >>41 # asm 1: rorx $41,r341=int64#1 # asm 2: rorx $41,r341=%rdi rorx $41,%r9,%rdi # qhasm: r3Sigma1 ^= r341 # asm 1: xor >>28 # asm 1: rorx $28,r7Sigma0=int64#1 # asm 2: rorx $28,r7Sigma0=%rdi rorx $28,%r13,%rdi # qhasm: maj6 ^= r0andr1 # asm 1: xor >>34 # asm 1: rorx $34,r734=int64#5 # asm 2: rorx $34,r734=%r8 rorx $34,%r13,%r8 # qhasm: r7Sigma0 ^= r734 # asm 1: xor >>39 # asm 1: rorx $39,r739=int64#5 # asm 2: rorx $39,r739=%r8 rorx $39,%r13,%r8 # qhasm: r7Sigma0 ^= r739 # asm 1: xor in=int64#1 # asm 2: movq in=%rdi movq 400(%rsp),%rdi # qhasm: r5 += wc891011[2] # asm 1: addq >>14 # asm 1: rorx $14,r2Sigma1=int64#5 # asm 2: rorx $14,r2Sigma1=%r8 rorx $14,%rdx,%r8 # qhasm: ch5 = r4 # asm 1: mov ch5=int64#7 # asm 2: mov ch5=%rax mov %r11,%rax # qhasm: r218 = r2>>>18 # asm 1: rorx $18,r218=int64#12 # asm 2: rorx $18,r218=%r14 rorx $18,%rdx,%r14 # qhasm: ch5 ^= r3 # asm 1: xor >>41 # asm 1: rorx $41,r241=int64#13 # asm 2: rorx $41,r241=%r15 rorx $41,%rdx,%r15 # qhasm: r2Sigma1 ^= r218 # asm 1: xor >>28 # asm 1: rorx $28,r6Sigma0=int64#12 # asm 2: rorx $28,r6Sigma0=%r14 rorx $28,%r12,%r14 # qhasm: r2Sigma1 ^= r241 # asm 1: xor >>34 # asm 1: rorx $34,r634=int64#13 # asm 2: rorx $34,r634=%r15 rorx $34,%r12,%r15 # qhasm: maj4 = r7 # asm 1: mov maj4=int64#14 # asm 2: mov maj4=%rbx mov %r13,%rbx # qhasm: maj4 ^= r6 # asm 1: xor r6andr7=int64#7 # asm 2: mov r6andr7=%rax mov %r13,%rax # qhasm: r639 = r6>>>39 # asm 1: rorx $39,r639=int64#13 # asm 2: rorx $39,r639=%r15 rorx $39,%r12,%r15 # qhasm: r6andr7 &= r6 # asm 1: and maj5=int64#5 # asm 2: mov maj5=%r8 mov %rsi,%r8 # qhasm: r4 += wc891011[3] # asm 1: addq ch4=int64#12 # asm 2: mov ch4=%r14 mov %r9,%r14 # qhasm: maj5 ^= r6andr7 # asm 1: xor >>14 # asm 1: rorx $14,r1Sigma1=int64#13 # asm 2: rorx $14,r1Sigma1=%r15 rorx $14,%rcx,%r15 # qhasm: r5 += maj5 # asm 1: add >>18 # asm 1: rorx $18,r118=int64#5 # asm 2: rorx $18,r118=%r8 rorx $18,%rcx,%r8 # qhasm: r1Sigma1 ^= r118 # asm 1: xor >>41 # asm 1: rorx $41,r141=int64#5 # asm 2: rorx $41,r141=%r8 rorx $41,%rcx,%r8 # qhasm: r1Sigma1 ^= r141 # asm 1: xor >>28 # asm 1: rorx $28,r5Sigma0=int64#5 # asm 2: rorx $28,r5Sigma0=%r8 rorx $28,%r10,%r8 # qhasm: maj4 ^= r6andr7 # asm 1: xor >>34 # asm 1: rorx $34,r534=int64#7 # asm 2: rorx $34,r534=%rax rorx $34,%r10,%rax # qhasm: r5Sigma0 ^= r534 # asm 1: xor >>39 # asm 1: rorx $39,r539=int64#7 # asm 2: rorx $39,r539=%rax rorx $39,%r10,%rax # qhasm: r5Sigma0 ^= r539 # asm 1: xor inlen=int64#7 # asm 2: movq inlen=%rax movq 376(%rsp),%rax # qhasm: in += 128 # asm 1: add $128,>>14 # asm 1: rorx $14,r0Sigma1=int64#5 # asm 2: rorx $14,r0Sigma1=%r8 rorx $14,%rsi,%r8 # qhasm: ch3 = r2 # asm 1: mov ch3=int64#12 # asm 2: mov ch3=%r14 mov %rdx,%r14 # qhasm: r018 = r0>>>18 # asm 1: rorx $18,r018=int64#13 # asm 2: rorx $18,r018=%r15 rorx $18,%rsi,%r15 # qhasm: ch3 ^= r1 # asm 1: xor >>41 # asm 1: rorx $41,r041=int64#14 # asm 2: rorx $41,r041=%rbx rorx $41,%rsi,%rbx # qhasm: r0Sigma1 ^= r018 # asm 1: xor >>28 # asm 1: rorx $28,r4Sigma0=int64#13 # asm 2: rorx $28,r4Sigma0=%r15 rorx $28,%r11,%r15 # qhasm: r0Sigma1 ^= r041 # asm 1: xor >>34 # asm 1: rorx $34,r434=int64#14 # asm 2: rorx $34,r434=%rbx rorx $34,%r11,%rbx # qhasm: maj2 = r5 # asm 1: mov maj2=int64#15 # asm 2: mov maj2=%rbp mov %r10,%rbp # qhasm: maj2 ^= r4 # asm 1: xor r4andr5=int64#12 # asm 2: mov r4andr5=%r14 mov %r10,%r14 # qhasm: r439 = r4>>>39 # asm 1: rorx $39,r439=int64#14 # asm 2: rorx $39,r439=%rbx rorx $39,%r11,%rbx # qhasm: r4andr5 &= r4 # asm 1: and maj3=int64#5 # asm 2: mov maj3=%r8 mov %r12,%r8 # qhasm: r2 += wc12131415[1] # asm 1: addq ch2=int64#13 # asm 2: mov ch2=%r15 mov %rcx,%r15 # qhasm: maj3 ^= r4andr5 # asm 1: xor >>14 # asm 1: rorx $14,r7Sigma1=int64#14 # asm 2: rorx $14,r7Sigma1=%rbx rorx $14,%r13,%rbx # qhasm: r3 += maj3 # asm 1: add >>18 # asm 1: rorx $18,r718=int64#5 # asm 2: rorx $18,r718=%r8 rorx $18,%r13,%r8 # qhasm: r7Sigma1 ^= r718 # asm 1: xor >>41 # asm 1: rorx $41,r741=int64#5 # asm 2: rorx $41,r741=%r8 rorx $41,%r13,%r8 # qhasm: r7Sigma1 ^= r741 # asm 1: xor >>28 # asm 1: rorx $28,r3Sigma0=int64#5 # asm 2: rorx $28,r3Sigma0=%r8 rorx $28,%r9,%r8 # qhasm: maj2 ^= r4andr5 # asm 1: xor >>34 # asm 1: rorx $34,r334=int64#12 # asm 2: rorx $34,r334=%r14 rorx $34,%r9,%r14 # qhasm: r3Sigma0 ^= r334 # asm 1: xor >>39 # asm 1: rorx $39,r339=int64#12 # asm 2: rorx $39,r339=%r14 rorx $39,%r9,%r14 # qhasm: r3Sigma0 ^= r339 # asm 1: xor >>14 # asm 1: rorx $14,r6Sigma1=int64#5 # asm 2: rorx $14,r6Sigma1=%r8 rorx $14,%r12,%r8 # qhasm: ch1 = r0 # asm 1: mov ch1=int64#12 # asm 2: mov ch1=%r14 mov %rsi,%r14 # qhasm: r618 = r6>>>18 # asm 1: rorx $18,r618=int64#13 # asm 2: rorx $18,r618=%r15 rorx $18,%r12,%r15 # qhasm: ch1 ^= r7 # asm 1: xor >>41 # asm 1: rorx $41,r641=int64#14 # asm 2: rorx $41,r641=%rbx rorx $41,%r12,%rbx # qhasm: r6Sigma1 ^= r618 # asm 1: xor >>28 # asm 1: rorx $28,r2Sigma0=int64#13 # asm 2: rorx $28,r2Sigma0=%r15 rorx $28,%rdx,%r15 # qhasm: r6Sigma1 ^= r641 # asm 1: xor >>34 # asm 1: rorx $34,r234=int64#14 # asm 2: rorx $34,r234=%rbx rorx $34,%rdx,%rbx # qhasm: maj0 = r3 # asm 1: mov maj0=int64#15 # asm 2: mov maj0=%rbp mov %r9,%rbp # qhasm: maj0 ^= r2 # asm 1: xor r2andr3=int64#12 # asm 2: mov r2andr3=%r14 mov %r9,%r14 # qhasm: r239 = r2>>>39 # asm 1: rorx $39,r239=int64#14 # asm 2: rorx $39,r239=%rbx rorx $39,%rdx,%rbx # qhasm: r2andr3 &= r2 # asm 1: and maj1=int64#5 # asm 2: mov maj1=%r8 mov %r11,%r8 # qhasm: r0 += wc12131415[3] # asm 1: addq ch0=int64#13 # asm 2: mov ch0=%r15 mov %r13,%r15 # qhasm: maj1 ^= r2andr3 # asm 1: xor >>14 # asm 1: rorx $14,r5Sigma1=int64#14 # asm 2: rorx $14,r5Sigma1=%rbx rorx $14,%r10,%rbx # qhasm: r1 += maj1 # asm 1: add >>18 # asm 1: rorx $18,r518=int64#5 # asm 2: rorx $18,r518=%r8 rorx $18,%r10,%r8 # qhasm: r5Sigma1 ^= r518 # asm 1: xor >>41 # asm 1: rorx $41,r541=int64#5 # asm 2: rorx $41,r541=%r8 rorx $41,%r10,%r8 # qhasm: r5Sigma1 ^= r541 # asm 1: xor >>28 # asm 1: rorx $28,r1Sigma0=int64#5 # asm 2: rorx $28,r1Sigma0=%r8 rorx $28,%rcx,%r8 # qhasm: maj0 ^= r2andr3 # asm 1: xor >>34 # asm 1: rorx $34,r134=int64#12 # asm 2: rorx $34,r134=%r14 rorx $34,%rcx,%r14 # qhasm: r1Sigma0 ^= r134 # asm 1: xor >>39 # asm 1: rorx $39,r139=int64#12 # asm 2: rorx $39,r139=%r14 rorx $39,%rcx,%r14 # qhasm: r1Sigma0 ^= r139 # asm 1: xor inlen_stack=stack64#8 # asm 2: movq inlen_stack=376(%rsp) movq %rax,376(%rsp) # qhasm: r7 += state4567[3] # asm 1: addq constants=int64#5 # asm 2: movq constants=%r8 movq 392(%rsp),%r8 # qhasm: r0 += state0123[0] # asm 1: addq statebytes=int64#1 # asm 2: movq statebytes=%rdi movq 384(%rsp),%rdi # qhasm: X0 = state0123 # asm 1: vmovapd X0=reg256#2 # asm 2: vmovapd X0=%ymm1 vmovapd 0(%rsp),%ymm1 # qhasm: X4 = state4567 # asm 1: vmovapd X4=reg256#3 # asm 2: vmovapd X4=%ymm2 vmovapd 32(%rsp),%ymm2 # qhasm: 2x 16x X0 = X0[bigendian64] # asm 1: vpshufb X0=reg256#2 # asm 2: vpshufb X0=%ymm1 vpshufb %ymm0,%ymm1,%ymm1 # qhasm: 2x 16x X4 = X4[bigendian64] # asm 1: vpshufb X4=reg256#1 # asm 2: vpshufb X4=%ymm0 vpshufb %ymm0,%ymm2,%ymm0 # qhasm: mem256[statebytes+0] = X0 # asm 1: vmovupd caller_r11=int64#9 # asm 2: movq caller_r11=%r11 movq 320(%rsp),%r11 # qhasm: caller_r12 = r12_stack # asm 1: movq caller_r12=int64#10 # asm 2: movq caller_r12=%r12 movq 336(%rsp),%r12 # qhasm: caller_r14 = r14_stack # asm 1: movq caller_r14=int64#12 # asm 2: movq caller_r14=%r14 movq 344(%rsp),%r14 # qhasm: caller_r13 = r13_stack # asm 1: movq caller_r13=int64#11 # asm 2: movq caller_r13=%r13 movq 328(%rsp),%r13 # qhasm: caller_r15 = r15_stack # asm 1: movq caller_r15=int64#13 # asm 2: movq caller_r15=%r15 movq 360(%rsp),%r15 # qhasm: caller_rbx = rbx_stack # asm 1: movq caller_rbx=int64#14 # asm 2: movq caller_rbx=%rbx movq 352(%rsp),%rbx # qhasm: caller_rbp = rbp_stack # asm 1: movq caller_rbp=int64#15 # asm 2: movq caller_rbp=%rbp movq 368(%rsp),%rbp # qhasm: return inlen add %r11,%rsp ret