781
643
d->XMM_D(1) = int32_to_float64(l1, &env->sse_status);
784
void OPPROTO op_cvtpi2ps(void)
646
void helper_cvtpi2ps(XMMReg *d, MMXReg *s)
786
XMMReg *d = (Reg *)((char *)env + PARAM1);
787
MMXReg *s = (MMXReg *)((char *)env + PARAM2);
788
648
d->XMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status);
789
649
d->XMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status);
792
void OPPROTO op_cvtpi2pd(void)
652
void helper_cvtpi2pd(XMMReg *d, MMXReg *s)
794
XMMReg *d = (Reg *)((char *)env + PARAM1);
795
MMXReg *s = (MMXReg *)((char *)env + PARAM2);
796
654
d->XMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status);
797
655
d->XMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status);
800
void OPPROTO op_cvtsi2ss(void)
658
void helper_cvtsi2ss(XMMReg *d, uint32_t val)
802
XMMReg *d = (Reg *)((char *)env + PARAM1);
803
d->XMM_S(0) = int32_to_float32(T0, &env->sse_status);
660
d->XMM_S(0) = int32_to_float32(val, &env->sse_status);
806
void OPPROTO op_cvtsi2sd(void)
663
void helper_cvtsi2sd(XMMReg *d, uint32_t val)
808
XMMReg *d = (Reg *)((char *)env + PARAM1);
809
d->XMM_D(0) = int32_to_float64(T0, &env->sse_status);
665
d->XMM_D(0) = int32_to_float64(val, &env->sse_status);
812
668
#ifdef TARGET_X86_64
813
void OPPROTO op_cvtsq2ss(void)
669
void helper_cvtsq2ss(XMMReg *d, uint64_t val)
815
XMMReg *d = (Reg *)((char *)env + PARAM1);
816
d->XMM_S(0) = int64_to_float32(T0, &env->sse_status);
671
d->XMM_S(0) = int64_to_float32(val, &env->sse_status);
819
void OPPROTO op_cvtsq2sd(void)
674
void helper_cvtsq2sd(XMMReg *d, uint64_t val)
821
XMMReg *d = (Reg *)((char *)env + PARAM1);
822
d->XMM_D(0) = int64_to_float64(T0, &env->sse_status);
676
d->XMM_D(0) = int64_to_float64(val, &env->sse_status);
826
680
/* float to integer */
827
void OPPROTO op_cvtps2dq(void)
681
void helper_cvtps2dq(XMMReg *d, XMMReg *s)
829
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
830
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
831
683
d->XMM_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status);
832
684
d->XMM_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status);
833
685
d->XMM_L(2) = float32_to_int32(s->XMM_S(2), &env->sse_status);
834
686
d->XMM_L(3) = float32_to_int32(s->XMM_S(3), &env->sse_status);
837
void OPPROTO op_cvtpd2dq(void)
689
void helper_cvtpd2dq(XMMReg *d, XMMReg *s)
839
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
840
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
841
691
d->XMM_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status);
842
692
d->XMM_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status);
846
void OPPROTO op_cvtps2pi(void)
696
void helper_cvtps2pi(MMXReg *d, XMMReg *s)
848
MMXReg *d = (MMXReg *)((char *)env + PARAM1);
849
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
850
698
d->MMX_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status);
851
699
d->MMX_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status);
854
void OPPROTO op_cvtpd2pi(void)
702
void helper_cvtpd2pi(MMXReg *d, XMMReg *s)
856
MMXReg *d = (MMXReg *)((char *)env + PARAM1);
857
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
858
704
d->MMX_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status);
859
705
d->MMX_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status);
862
void OPPROTO op_cvtss2si(void)
708
int32_t helper_cvtss2si(XMMReg *s)
864
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
865
T0 = float32_to_int32(s->XMM_S(0), &env->sse_status);
710
return float32_to_int32(s->XMM_S(0), &env->sse_status);
868
void OPPROTO op_cvtsd2si(void)
713
int32_t helper_cvtsd2si(XMMReg *s)
870
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
871
T0 = float64_to_int32(s->XMM_D(0), &env->sse_status);
715
return float64_to_int32(s->XMM_D(0), &env->sse_status);
874
718
#ifdef TARGET_X86_64
875
void OPPROTO op_cvtss2sq(void)
719
int64_t helper_cvtss2sq(XMMReg *s)
877
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
878
T0 = float32_to_int64(s->XMM_S(0), &env->sse_status);
721
return float32_to_int64(s->XMM_S(0), &env->sse_status);
881
void OPPROTO op_cvtsd2sq(void)
724
int64_t helper_cvtsd2sq(XMMReg *s)
883
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
884
T0 = float64_to_int64(s->XMM_D(0), &env->sse_status);
726
return float64_to_int64(s->XMM_D(0), &env->sse_status);
888
730
/* float to integer truncated */
889
void OPPROTO op_cvttps2dq(void)
731
void helper_cvttps2dq(XMMReg *d, XMMReg *s)
891
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
892
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
893
733
d->XMM_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
894
734
d->XMM_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status);
895
735
d->XMM_L(2) = float32_to_int32_round_to_zero(s->XMM_S(2), &env->sse_status);
896
736
d->XMM_L(3) = float32_to_int32_round_to_zero(s->XMM_S(3), &env->sse_status);
899
void OPPROTO op_cvttpd2dq(void)
739
void helper_cvttpd2dq(XMMReg *d, XMMReg *s)
901
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
902
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
903
741
d->XMM_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
904
742
d->XMM_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status);
908
void OPPROTO op_cvttps2pi(void)
746
void helper_cvttps2pi(MMXReg *d, XMMReg *s)
910
MMXReg *d = (MMXReg *)((char *)env + PARAM1);
911
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
912
748
d->MMX_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
913
749
d->MMX_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status);
916
void OPPROTO op_cvttpd2pi(void)
752
void helper_cvttpd2pi(MMXReg *d, XMMReg *s)
918
MMXReg *d = (MMXReg *)((char *)env + PARAM1);
919
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
920
754
d->MMX_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
921
755
d->MMX_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status);
924
void OPPROTO op_cvttss2si(void)
758
int32_t helper_cvttss2si(XMMReg *s)
926
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
927
T0 = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
760
return float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
930
void OPPROTO op_cvttsd2si(void)
763
int32_t helper_cvttsd2si(XMMReg *s)
932
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
933
T0 = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
765
return float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
936
768
#ifdef TARGET_X86_64
937
void OPPROTO op_cvttss2sq(void)
769
int64_t helper_cvttss2sq(XMMReg *s)
939
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
940
T0 = float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status);
771
return float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status);
943
void OPPROTO op_cvttsd2sq(void)
774
int64_t helper_cvttsd2sq(XMMReg *s)
945
XMMReg *s = (XMMReg *)((char *)env + PARAM1);
946
T0 = float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status);
776
return float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status);
950
void OPPROTO op_rsqrtps(void)
780
void helper_rsqrtps(XMMReg *d, XMMReg *s)
952
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
953
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
954
782
d->XMM_S(0) = approx_rsqrt(s->XMM_S(0));
955
783
d->XMM_S(1) = approx_rsqrt(s->XMM_S(1));
956
784
d->XMM_S(2) = approx_rsqrt(s->XMM_S(2));
957
785
d->XMM_S(3) = approx_rsqrt(s->XMM_S(3));
960
void OPPROTO op_rsqrtss(void)
788
void helper_rsqrtss(XMMReg *d, XMMReg *s)
962
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
963
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
964
790
d->XMM_S(0) = approx_rsqrt(s->XMM_S(0));
967
void OPPROTO op_rcpps(void)
793
void helper_rcpps(XMMReg *d, XMMReg *s)
969
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
970
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
971
795
d->XMM_S(0) = approx_rcp(s->XMM_S(0));
972
796
d->XMM_S(1) = approx_rcp(s->XMM_S(1));
973
797
d->XMM_S(2) = approx_rcp(s->XMM_S(2));
974
798
d->XMM_S(3) = approx_rcp(s->XMM_S(3));
977
void OPPROTO op_rcpss(void)
801
void helper_rcpss(XMMReg *d, XMMReg *s)
979
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
980
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
981
803
d->XMM_S(0) = approx_rcp(s->XMM_S(0));
984
void OPPROTO op_haddps(void)
806
void helper_haddps(XMMReg *d, XMMReg *s)
986
XMMReg *d = (XMMReg *)((char *)env + PARAM1);
987
XMMReg *s = (XMMReg *)((char *)env + PARAM2);
989
809
r.XMM_S(0) = d->XMM_S(0) + d->XMM_S(1);
990
810
r.XMM_S(1) = d->XMM_S(2) + d->XMM_S(3);
1134
/* 3DNow! float ops */
1136
void helper_pi2fd(MMXReg *d, MMXReg *s)
1138
d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status);
1139
d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status);
1142
void helper_pi2fw(MMXReg *d, MMXReg *s)
1144
d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status);
1145
d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status);
1148
void helper_pf2id(MMXReg *d, MMXReg *s)
1150
d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status);
1151
d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status);
1154
void helper_pf2iw(MMXReg *d, MMXReg *s)
1156
d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status));
1157
d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status));
1160
void helper_pfacc(MMXReg *d, MMXReg *s)
1163
r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
1164
r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
1168
void helper_pfadd(MMXReg *d, MMXReg *s)
1170
d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
1171
d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
1174
void helper_pfcmpeq(MMXReg *d, MMXReg *s)
1176
d->MMX_L(0) = float32_eq(d->MMX_S(0), s->MMX_S(0), &env->mmx_status) ? -1 : 0;
1177
d->MMX_L(1) = float32_eq(d->MMX_S(1), s->MMX_S(1), &env->mmx_status) ? -1 : 0;
1180
void helper_pfcmpge(MMXReg *d, MMXReg *s)
1182
d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0;
1183
d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0;
1186
void helper_pfcmpgt(MMXReg *d, MMXReg *s)
1188
d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0;
1189
d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0;
1192
void helper_pfmax(MMXReg *d, MMXReg *s)
1194
if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status))
1195
d->MMX_S(0) = s->MMX_S(0);
1196
if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status))
1197
d->MMX_S(1) = s->MMX_S(1);
1200
void helper_pfmin(MMXReg *d, MMXReg *s)
1202
if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status))
1203
d->MMX_S(0) = s->MMX_S(0);
1204
if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status))
1205
d->MMX_S(1) = s->MMX_S(1);
1208
void helper_pfmul(MMXReg *d, MMXReg *s)
1210
d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
1211
d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
1214
void helper_pfnacc(MMXReg *d, MMXReg *s)
1217
r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
1218
r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
1222
void helper_pfpnacc(MMXReg *d, MMXReg *s)
1225
r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
1226
r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
1230
void helper_pfrcp(MMXReg *d, MMXReg *s)
1232
d->MMX_S(0) = approx_rcp(s->MMX_S(0));
1233
d->MMX_S(1) = d->MMX_S(0);
1236
void helper_pfrsqrt(MMXReg *d, MMXReg *s)
1238
d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff;
1239
d->MMX_S(1) = approx_rsqrt(d->MMX_S(1));
1240
d->MMX_L(1) |= s->MMX_L(0) & 0x80000000;
1241
d->MMX_L(0) = d->MMX_L(1);
1244
void helper_pfsub(MMXReg *d, MMXReg *s)
1246
d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
1247
d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
1250
void helper_pfsubr(MMXReg *d, MMXReg *s)
1252
d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status);
1253
d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status);
1256
void helper_pswapd(MMXReg *d, MMXReg *s)
1259
r.MMX_L(0) = s->MMX_L(1);
1260
r.MMX_L(1) = s->MMX_L(0);
1265
/* SSSE3 op helpers */
1266
void glue(helper_pshufb, SUFFIX) (Reg *d, Reg *s)
1271
for (i = 0; i < (8 << SHIFT); i++)
1272
r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1)));
1277
void glue(helper_phaddw, SUFFIX) (Reg *d, Reg *s)
1279
d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
1280
d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
1281
XMM_ONLY(d->W(2) = (int16_t)d->W(4) + (int16_t)d->W(5));
1282
XMM_ONLY(d->W(3) = (int16_t)d->W(6) + (int16_t)d->W(7));
1283
d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1);
1284
d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3);
1285
XMM_ONLY(d->W(6) = (int16_t)s->W(4) + (int16_t)s->W(5));
1286
XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
1289
void glue(helper_phaddd, SUFFIX) (Reg *d, Reg *s)
1291
d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
1292
XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
1293
d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1);
1294
XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
1297
void glue(helper_phaddsw, SUFFIX) (Reg *d, Reg *s)
1299
d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
1300
d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
1301
XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5)));
1302
XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7)));
1303
d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1));
1304
d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3));
1305
XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5)));
1306
XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
1309
void glue(helper_pmaddubsw, SUFFIX) (Reg *d, Reg *s)
1311
d->W(0) = satsw((int8_t)s->B( 0) * (uint8_t)d->B( 0) +
1312
(int8_t)s->B( 1) * (uint8_t)d->B( 1));
1313
d->W(1) = satsw((int8_t)s->B( 2) * (uint8_t)d->B( 2) +
1314
(int8_t)s->B( 3) * (uint8_t)d->B( 3));
1315
d->W(2) = satsw((int8_t)s->B( 4) * (uint8_t)d->B( 4) +
1316
(int8_t)s->B( 5) * (uint8_t)d->B( 5));
1317
d->W(3) = satsw((int8_t)s->B( 6) * (uint8_t)d->B( 6) +
1318
(int8_t)s->B( 7) * (uint8_t)d->B( 7));
1320
d->W(4) = satsw((int8_t)s->B( 8) * (uint8_t)d->B( 8) +
1321
(int8_t)s->B( 9) * (uint8_t)d->B( 9));
1322
d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) +
1323
(int8_t)s->B(11) * (uint8_t)d->B(11));
1324
d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) +
1325
(int8_t)s->B(13) * (uint8_t)d->B(13));
1326
d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) +
1327
(int8_t)s->B(15) * (uint8_t)d->B(15));
1331
void glue(helper_phsubw, SUFFIX) (Reg *d, Reg *s)
1333
d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1);
1334
d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3);
1335
XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5));
1336
XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7));
1337
d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1);
1338
d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3);
1339
XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5));
1340
XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7));
1343
void glue(helper_phsubd, SUFFIX) (Reg *d, Reg *s)
1345
d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1);
1346
XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3));
1347
d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1);
1348
XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3));
1351
void glue(helper_phsubsw, SUFFIX) (Reg *d, Reg *s)
1353
d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1));
1354
d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3));
1355
XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5)));
1356
XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7)));
1357
d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1));
1358
d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3));
1359
XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5)));
1360
XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7)));
1363
#define FABSB(_, x) x > INT8_MAX ? -(int8_t ) x : x
1364
#define FABSW(_, x) x > INT16_MAX ? -(int16_t) x : x
1365
#define FABSL(_, x) x > INT32_MAX ? -(int32_t) x : x
1366
SSE_HELPER_B(helper_pabsb, FABSB)
1367
SSE_HELPER_W(helper_pabsw, FABSW)
1368
SSE_HELPER_L(helper_pabsd, FABSL)
1370
#define FMULHRSW(d, s) ((int16_t) d * (int16_t) s + 0x4000) >> 15
1371
SSE_HELPER_W(helper_pmulhrsw, FMULHRSW)
1373
#define FSIGNB(d, s) s <= INT8_MAX ? s ? d : 0 : -(int8_t ) d
1374
#define FSIGNW(d, s) s <= INT16_MAX ? s ? d : 0 : -(int16_t) d
1375
#define FSIGNL(d, s) s <= INT32_MAX ? s ? d : 0 : -(int32_t) d
1376
SSE_HELPER_B(helper_psignb, FSIGNB)
1377
SSE_HELPER_W(helper_psignw, FSIGNW)
1378
SSE_HELPER_L(helper_psignd, FSIGNL)
1380
void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift)
1384
/* XXX could be checked during translation */
1385
if (shift >= (16 << SHIFT)) {
1387
XMM_ONLY(r.Q(1) = 0);
1390
#define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
1392
r.Q(0) = SHR(s->Q(0), shift - 0) |
1393
SHR(d->Q(0), shift - 64);
1395
r.Q(0) = SHR(s->Q(0), shift - 0) |
1396
SHR(s->Q(1), shift - 64) |
1397
SHR(d->Q(0), shift - 128) |
1398
SHR(d->Q(1), shift - 192);
1399
r.Q(1) = SHR(s->Q(0), shift + 64) |
1400
SHR(s->Q(1), shift - 0) |
1401
SHR(d->Q(0), shift - 64) |
1402
SHR(d->Q(1), shift - 128);
1410
#define XMM0 env->xmm_regs[0]
1413
#define SSE_HELPER_V(name, elem, num, F)\
1414
void glue(name, SUFFIX) (Reg *d, Reg *s)\
1416
d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0));\
1417
d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1));\
1419
d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2));\
1420
d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3));\
1422
d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4));\
1423
d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5));\
1424
d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6));\
1425
d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7));\
1427
d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8));\
1428
d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9));\
1429
d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10));\
1430
d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11));\
1431
d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12));\
1432
d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13));\
1433
d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14));\
1434
d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15));\
1440
#define SSE_HELPER_I(name, elem, num, F)\
1441
void glue(name, SUFFIX) (Reg *d, Reg *s, uint32_t imm)\
1443
d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1));\
1444
d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1));\
1446
d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1));\
1447
d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1));\
1449
d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1));\
1450
d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1));\
1451
d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1));\
1452
d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1));\
1454
d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1));\
1455
d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1));\
1456
d->elem(10) = F(d->elem(10), s->elem(10), ((imm >> 10) & 1));\
1457
d->elem(11) = F(d->elem(11), s->elem(11), ((imm >> 11) & 1));\
1458
d->elem(12) = F(d->elem(12), s->elem(12), ((imm >> 12) & 1));\
1459
d->elem(13) = F(d->elem(13), s->elem(13), ((imm >> 13) & 1));\
1460
d->elem(14) = F(d->elem(14), s->elem(14), ((imm >> 14) & 1));\
1461
d->elem(15) = F(d->elem(15), s->elem(15), ((imm >> 15) & 1));\
1467
/* SSE4.1 op helpers */
1468
#define FBLENDVB(d, s, m) (m & 0x80) ? s : d
1469
#define FBLENDVPS(d, s, m) (m & 0x80000000) ? s : d
1470
#define FBLENDVPD(d, s, m) (m & 0x8000000000000000LL) ? s : d
1471
SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB)
1472
SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS)
1473
SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD)
1475
void glue(helper_ptest, SUFFIX) (Reg *d, Reg *s)
1477
uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1));
1478
uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1));
1480
CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C);
1483
#define SSE_HELPER_F(name, elem, num, F)\
1484
void glue(name, SUFFIX) (Reg *d, Reg *s)\
1500
SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B)
1501
SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B)
1502
SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B)
1503
SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W)
1504
SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W)
1505
SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L)
1506
SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B)
1507
SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B)
1508
SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B)
1509
SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W)
1510
SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W)
1511
SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L)
1513
void glue(helper_pmuldq, SUFFIX) (Reg *d, Reg *s)
1515
d->Q(0) = (int64_t) (int32_t) d->L(0) * (int32_t) s->L(0);
1516
d->Q(1) = (int64_t) (int32_t) d->L(2) * (int32_t) s->L(2);
1519
#define FCMPEQQ(d, s) d == s ? -1 : 0
1520
SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ)
1522
void glue(helper_packusdw, SUFFIX) (Reg *d, Reg *s)
1524
d->W(0) = satuw((int32_t) d->L(0));
1525
d->W(1) = satuw((int32_t) d->L(1));
1526
d->W(2) = satuw((int32_t) d->L(2));
1527
d->W(3) = satuw((int32_t) d->L(3));
1528
d->W(4) = satuw((int32_t) s->L(0));
1529
d->W(5) = satuw((int32_t) s->L(1));
1530
d->W(6) = satuw((int32_t) s->L(2));
1531
d->W(7) = satuw((int32_t) s->L(3));
1534
#define FMINSB(d, s) MIN((int8_t) d, (int8_t) s)
1535
#define FMINSD(d, s) MIN((int32_t) d, (int32_t) s)
1536
#define FMAXSB(d, s) MAX((int8_t) d, (int8_t) s)
1537
#define FMAXSD(d, s) MAX((int32_t) d, (int32_t) s)
1538
SSE_HELPER_B(helper_pminsb, FMINSB)
1539
SSE_HELPER_L(helper_pminsd, FMINSD)
1540
SSE_HELPER_W(helper_pminuw, MIN)
1541
SSE_HELPER_L(helper_pminud, MIN)
1542
SSE_HELPER_B(helper_pmaxsb, FMAXSB)
1543
SSE_HELPER_L(helper_pmaxsd, FMAXSD)
1544
SSE_HELPER_W(helper_pmaxuw, MAX)
1545
SSE_HELPER_L(helper_pmaxud, MAX)
1547
#define FMULLD(d, s) (int32_t) d * (int32_t) s
1548
SSE_HELPER_L(helper_pmulld, FMULLD)
1550
void glue(helper_phminposuw, SUFFIX) (Reg *d, Reg *s)
1554
if (s->W(1) < s->W(idx))
1556
if (s->W(2) < s->W(idx))
1558
if (s->W(3) < s->W(idx))
1560
if (s->W(4) < s->W(idx))
1562
if (s->W(5) < s->W(idx))
1564
if (s->W(6) < s->W(idx))
1566
if (s->W(7) < s->W(idx))
1572
d->W(0) = s->W(idx);
1575
void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1577
signed char prev_rounding_mode;
1579
prev_rounding_mode = env->sse_status.float_rounding_mode;
1580
if (!(mode & (1 << 2)))
1583
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1586
set_float_rounding_mode(float_round_down, &env->sse_status);
1589
set_float_rounding_mode(float_round_up, &env->sse_status);
1592
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1596
d->L(0) = float64_round_to_int(s->L(0), &env->sse_status);
1597
d->L(1) = float64_round_to_int(s->L(1), &env->sse_status);
1598
d->L(2) = float64_round_to_int(s->L(2), &env->sse_status);
1599
d->L(3) = float64_round_to_int(s->L(3), &env->sse_status);
1602
if (mode & (1 << 3))
1603
set_float_exception_flags(
1604
get_float_exception_flags(&env->sse_status) &
1605
~float_flag_inexact,
1608
env->sse_status.float_rounding_mode = prev_rounding_mode;
1611
void glue(helper_roundpd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1613
signed char prev_rounding_mode;
1615
prev_rounding_mode = env->sse_status.float_rounding_mode;
1616
if (!(mode & (1 << 2)))
1619
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1622
set_float_rounding_mode(float_round_down, &env->sse_status);
1625
set_float_rounding_mode(float_round_up, &env->sse_status);
1628
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1632
d->Q(0) = float64_round_to_int(s->Q(0), &env->sse_status);
1633
d->Q(1) = float64_round_to_int(s->Q(1), &env->sse_status);
1636
if (mode & (1 << 3))
1637
set_float_exception_flags(
1638
get_float_exception_flags(&env->sse_status) &
1639
~float_flag_inexact,
1642
env->sse_status.float_rounding_mode = prev_rounding_mode;
1645
void glue(helper_roundss, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1647
signed char prev_rounding_mode;
1649
prev_rounding_mode = env->sse_status.float_rounding_mode;
1650
if (!(mode & (1 << 2)))
1653
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1656
set_float_rounding_mode(float_round_down, &env->sse_status);
1659
set_float_rounding_mode(float_round_up, &env->sse_status);
1662
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1666
d->L(0) = float64_round_to_int(s->L(0), &env->sse_status);
1669
if (mode & (1 << 3))
1670
set_float_exception_flags(
1671
get_float_exception_flags(&env->sse_status) &
1672
~float_flag_inexact,
1675
env->sse_status.float_rounding_mode = prev_rounding_mode;
1678
void glue(helper_roundsd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1680
signed char prev_rounding_mode;
1682
prev_rounding_mode = env->sse_status.float_rounding_mode;
1683
if (!(mode & (1 << 2)))
1686
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1689
set_float_rounding_mode(float_round_down, &env->sse_status);
1692
set_float_rounding_mode(float_round_up, &env->sse_status);
1695
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1699
d->Q(0) = float64_round_to_int(s->Q(0), &env->sse_status);
1702
if (mode & (1 << 3))
1703
set_float_exception_flags(
1704
get_float_exception_flags(&env->sse_status) &
1705
~float_flag_inexact,
1708
env->sse_status.float_rounding_mode = prev_rounding_mode;
1711
#define FBLENDP(d, s, m) m ? s : d
1712
SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
1713
SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
1714
SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
1716
void glue(helper_dpps, SUFFIX) (Reg *d, Reg *s, uint32_t mask)
1718
float32 iresult = 0 /*float32_zero*/;
1720
if (mask & (1 << 4))
1721
iresult = float32_add(iresult,
1722
float32_mul(d->L(0), s->L(0), &env->sse_status),
1724
if (mask & (1 << 5))
1725
iresult = float32_add(iresult,
1726
float32_mul(d->L(1), s->L(1), &env->sse_status),
1728
if (mask & (1 << 6))
1729
iresult = float32_add(iresult,
1730
float32_mul(d->L(2), s->L(2), &env->sse_status),
1732
if (mask & (1 << 7))
1733
iresult = float32_add(iresult,
1734
float32_mul(d->L(3), s->L(3), &env->sse_status),
1736
d->L(0) = (mask & (1 << 0)) ? iresult : 0 /*float32_zero*/;
1737
d->L(1) = (mask & (1 << 1)) ? iresult : 0 /*float32_zero*/;
1738
d->L(2) = (mask & (1 << 2)) ? iresult : 0 /*float32_zero*/;
1739
d->L(3) = (mask & (1 << 3)) ? iresult : 0 /*float32_zero*/;
1742
void glue(helper_dppd, SUFFIX) (Reg *d, Reg *s, uint32_t mask)
1744
float64 iresult = 0 /*float64_zero*/;
1746
if (mask & (1 << 4))
1747
iresult = float64_add(iresult,
1748
float64_mul(d->Q(0), s->Q(0), &env->sse_status),
1750
if (mask & (1 << 5))
1751
iresult = float64_add(iresult,
1752
float64_mul(d->Q(1), s->Q(1), &env->sse_status),
1754
d->Q(0) = (mask & (1 << 0)) ? iresult : 0 /*float64_zero*/;
1755
d->Q(1) = (mask & (1 << 1)) ? iresult : 0 /*float64_zero*/;
1758
void glue(helper_mpsadbw, SUFFIX) (Reg *d, Reg *s, uint32_t offset)
1760
int s0 = (offset & 3) << 2;
1761
int d0 = (offset & 4) << 0;
1765
for (i = 0; i < 8; i++, d0++) {
1767
r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0));
1768
r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1));
1769
r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2));
1770
r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3));
1776
/* SSE4.2 op helpers */
1777
/* it's unclear whether signed or unsigned */
1778
#define FCMPGTQ(d, s) d > s ? -1 : 0
1779
SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ)
1781
static inline int pcmp_elen(int reg, uint32_t ctrl)
1785
/* Presence of REX.W is indicated by a bit higher than 7 set */
1787
val = abs1((int64_t) env->regs[reg]);
1789
val = abs1((int32_t) env->regs[reg]);
1801
static inline int pcmp_ilen(Reg *r, uint8_t ctrl)
1806
while (val < 8 && r->W(val))
1809
while (val < 16 && r->B(val))
1815
static inline int pcmp_val(Reg *r, uint8_t ctrl, int i)
1817
switch ((ctrl >> 0) & 3) {
1823
return (int8_t) r->B(i);
1826
return (int16_t) r->W(i);
1830
static inline unsigned pcmpxstrx(Reg *d, Reg *s,
1831
int8_t ctrl, int valids, int validd)
1833
unsigned int res = 0;
1836
int upper = (ctrl & 1) ? 7 : 15;
1841
CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0);
1843
switch ((ctrl >> 2) & 3) {
1845
for (j = valids; j >= 0; j--) {
1847
v = pcmp_val(s, ctrl, j);
1848
for (i = validd; i >= 0; i--)
1849
res |= (v == pcmp_val(d, ctrl, i));
1853
for (j = valids; j >= 0; j--) {
1855
v = pcmp_val(s, ctrl, j);
1856
for (i = ((validd - 1) | 1); i >= 0; i -= 2)
1857
res |= (pcmp_val(d, ctrl, i - 0) <= v &&
1858
pcmp_val(d, ctrl, i - 1) >= v);
1862
res = (2 << (upper - MAX(valids, validd))) - 1;
1863
res <<= MAX(valids, validd) - MIN(valids, validd);
1864
for (i = MIN(valids, validd); i >= 0; i--) {
1866
v = pcmp_val(s, ctrl, i);
1867
res |= (v == pcmp_val(d, ctrl, i));
1871
for (j = valids - validd; j >= 0; j--) {
1874
for (i = MIN(upper - j, validd); i >= 0; i--)
1875
res &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
1880
switch ((ctrl >> 4) & 3) {
1882
res ^= (2 << upper) - 1;
1885
res ^= (2 << valids) - 1;
1897
static inline int rffs1(unsigned int val)
1901
for (hi = sizeof(val) * 4; hi; hi /= 2)
1910
static inline int ffs1(unsigned int val)
1914
for (hi = sizeof(val) * 4; hi; hi /= 2)
1923
void glue(helper_pcmpestri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
1925
unsigned int res = pcmpxstrx(d, s, ctrl,
1926
pcmp_elen(R_EDX, ctrl),
1927
pcmp_elen(R_EAX, ctrl));
1930
env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1;
1932
env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
1935
void glue(helper_pcmpestrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
1938
unsigned int res = pcmpxstrx(d, s, ctrl,
1939
pcmp_elen(R_EDX, ctrl),
1940
pcmp_elen(R_EAX, ctrl));
1942
if ((ctrl >> 6) & 1) {
1944
for (i = 0; i <= 8; i--, res >>= 1)
1945
d->W(i) = (res & 1) ? ~0 : 0;
1947
for (i = 0; i <= 16; i--, res >>= 1)
1948
d->B(i) = (res & 1) ? ~0 : 0;
1955
void glue(helper_pcmpistri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
1957
unsigned int res = pcmpxstrx(d, s, ctrl,
1959
pcmp_ilen(d, ctrl));
1962
env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1;
1964
env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
1967
void glue(helper_pcmpistrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
1970
unsigned int res = pcmpxstrx(d, s, ctrl,
1972
pcmp_ilen(d, ctrl));
1974
if ((ctrl >> 6) & 1) {
1976
for (i = 0; i <= 8; i--, res >>= 1)
1977
d->W(i) = (res & 1) ? ~0 : 0;
1979
for (i = 0; i <= 16; i--, res >>= 1)
1980
d->B(i) = (res & 1) ? ~0 : 0;
1987
#define CRCPOLY 0x1edc6f41
1988
#define CRCPOLY_BITREV 0x82f63b78
1989
target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len)
1991
target_ulong crc = (msg & ((target_ulong) -1 >>
1992
(TARGET_LONG_BITS - len))) ^ crc1;
1995
crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0);
2000
#define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
2001
#define POPCOUNT(n, i) (n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i))
2002
target_ulong helper_popcnt(target_ulong n, uint32_t type)
2004
CC_SRC = n ? 0 : CC_Z;
2014
#ifndef TARGET_X86_64
2020
return POPCOUNT(n, 5);
1387
2026
#undef XMM_ONLY