InstEmitSimdArithmetic32.cs 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161
  1. using ARMeilleure.Decoders;
  2. using ARMeilleure.IntermediateRepresentation;
  3. using ARMeilleure.Translation;
  4. using System;
  5. using static ARMeilleure.Instructions.InstEmitFlowHelper;
  6. using static ARMeilleure.Instructions.InstEmitHelper;
  7. using static ARMeilleure.Instructions.InstEmitSimdHelper;
  8. using static ARMeilleure.Instructions.InstEmitSimdHelper32;
  9. using static ARMeilleure.IntermediateRepresentation.OperandHelper;
  10. namespace ARMeilleure.Instructions
  11. {
  12. static partial class InstEmit32
  13. {
  14. public static void Vabs_S(ArmEmitterContext context)
  15. {
  16. OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
  17. if (Optimizations.FastFP && Optimizations.UseSse2)
  18. {
  19. EmitScalarUnaryOpSimd32(context, (m) =>
  20. {
  21. return EmitFloatAbs(context, m, (op.Size & 1) == 0, false);
  22. });
  23. }
  24. else
  25. {
  26. EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1));
  27. }
  28. }
  29. public static void Vabs_V(ArmEmitterContext context)
  30. {
  31. OpCode32Simd op = (OpCode32Simd)context.CurrOp;
  32. if (op.F)
  33. {
  34. if (Optimizations.FastFP && Optimizations.UseSse2)
  35. {
  36. EmitVectorUnaryOpSimd32(context, (m) =>
  37. {
  38. return EmitFloatAbs(context, m, (op.Size & 1) == 0, true);
  39. });
  40. }
  41. else
  42. {
  43. EmitVectorUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, MathF.Abs, Math.Abs, op1));
  44. }
  45. }
  46. else
  47. {
  48. EmitVectorUnaryOpSx32(context, (op1) => EmitAbs(context, op1));
  49. }
  50. }
  51. private static Operand EmitAbs(ArmEmitterContext context, Operand value)
  52. {
  53. Operand isPositive = context.ICompareGreaterOrEqual(value, Const(value.Type, 0));
  54. return context.ConditionalSelect(isPositive, value, context.Negate(value));
  55. }
  56. public static void Vadd_S(ArmEmitterContext context)
  57. {
  58. if (Optimizations.FastFP && Optimizations.UseSse2)
  59. {
  60. EmitScalarBinaryOpF32(context, Intrinsic.X86Addss, Intrinsic.X86Addsd);
  61. }
  62. else if (Optimizations.FastFP)
  63. {
  64. EmitScalarBinaryOpF32(context, (op1, op2) => context.Add(op1, op2));
  65. }
  66. else
  67. {
  68. EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, SoftFloat32.FPAdd, SoftFloat64.FPAdd, op1, op2));
  69. }
  70. }
  71. public static void Vadd_V(ArmEmitterContext context)
  72. {
  73. if (Optimizations.FastFP && Optimizations.UseSse2)
  74. {
  75. EmitVectorBinaryOpF32(context, Intrinsic.X86Addps, Intrinsic.X86Addpd);
  76. }
  77. else if (Optimizations.FastFP)
  78. {
  79. EmitVectorBinaryOpF32(context, (op1, op2) => context.Add(op1, op2));
  80. }
  81. else
  82. {
  83. EmitVectorBinaryOpF32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPAddFpscr, SoftFloat64.FPAddFpscr, op1, op2));
  84. }
  85. }
  86. public static void Vadd_I(ArmEmitterContext context)
  87. {
  88. if (Optimizations.UseSse2)
  89. {
  90. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  91. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PaddInstruction[op.Size], op1, op2));
  92. }
  93. else
  94. {
  95. EmitVectorBinaryOpZx32(context, (op1, op2) => context.Add(op1, op2));
  96. }
  97. }
  98. public static void Vdup(ArmEmitterContext context)
  99. {
  100. OpCode32SimdDupGP op = (OpCode32SimdDupGP)context.CurrOp;
  101. Operand insert = GetIntA32(context, op.Rt);
  102. // Zero extend into an I64, then replicate. Saves the most time over elementwise inserts.
  103. switch (op.Size)
  104. {
  105. case 2:
  106. insert = context.Multiply(context.ZeroExtend32(OperandType.I64, insert), Const(0x0000000100000001u));
  107. break;
  108. case 1:
  109. insert = context.Multiply(context.ZeroExtend16(OperandType.I64, insert), Const(0x0001000100010001u));
  110. break;
  111. case 0:
  112. insert = context.Multiply(context.ZeroExtend8(OperandType.I64, insert), Const(0x0101010101010101u));
  113. break;
  114. default:
  115. throw new InvalidOperationException("Unknown Vdup Size.");
  116. }
  117. InsertScalar(context, op.Vd, insert);
  118. if (op.Q)
  119. {
  120. InsertScalar(context, op.Vd + 1, insert);
  121. }
  122. }
  123. public static void Vdup_1(ArmEmitterContext context)
  124. {
  125. OpCode32SimdDupElem op = (OpCode32SimdDupElem)context.CurrOp;
  126. Operand insert = EmitVectorExtractZx32(context, op.Vm >> 1, ((op.Vm & 1) << (3 - op.Size)) + op.Index, op.Size);
  127. // Zero extend into an I64, then replicate. Saves the most time over elementwise inserts.
  128. switch (op.Size)
  129. {
  130. case 2:
  131. insert = context.Multiply(context.ZeroExtend32(OperandType.I64, insert), Const(0x0000000100000001u));
  132. break;
  133. case 1:
  134. insert = context.Multiply(context.ZeroExtend16(OperandType.I64, insert), Const(0x0001000100010001u));
  135. break;
  136. case 0:
  137. insert = context.Multiply(context.ZeroExtend8(OperandType.I64, insert), Const(0x0101010101010101u));
  138. break;
  139. default:
  140. throw new InvalidOperationException("Unknown Vdup Size.");
  141. }
  142. InsertScalar(context, op.Vd, insert);
  143. if (op.Q)
  144. {
  145. InsertScalar(context, op.Vd | 1, insert);
  146. }
  147. }
  148. private static (long, long) MaskHelperByteSequence(int start, int length, int startByte)
  149. {
  150. int end = start + length;
  151. int b = startByte;
  152. long result = 0;
  153. long result2 = 0;
  154. for (int i = 0; i < 8; i++)
  155. {
  156. result |= (long)((i >= end || i < start) ? 0x80 : b++) << (i * 8);
  157. }
  158. for (int i = 8; i < 16; i++)
  159. {
  160. result2 |= (long)((i >= end || i < start) ? 0x80 : b++) << ((i - 8) * 8);
  161. }
  162. return (result2, result);
  163. }
  164. public static void Vext(ArmEmitterContext context)
  165. {
  166. OpCode32SimdExt op = (OpCode32SimdExt)context.CurrOp;
  167. int elems = op.GetBytesCount();
  168. int byteOff = op.Immediate;
  169. if (Optimizations.UseSsse3)
  170. {
  171. EmitVectorBinaryOpSimd32(context, (n, m) =>
  172. {
  173. // Writing low to high of d: start <imm> into n, overlap into m.
  174. // Then rotate n down by <imm>, m up by (elems)-imm.
  175. // Then OR them together for the result.
  176. (long nMaskHigh, long nMaskLow) = MaskHelperByteSequence(0, elems - byteOff, byteOff);
  177. (long mMaskHigh, long mMaskLow) = MaskHelperByteSequence(elems - byteOff, byteOff, 0);
  178. Operand nMask, mMask;
  179. if (!op.Q)
  180. {
  181. // Do the same operation to the bytes in the top doubleword too, as our target could be in either.
  182. nMaskHigh = nMaskLow + 0x0808080808080808L;
  183. mMaskHigh = mMaskLow + 0x0808080808080808L;
  184. }
  185. nMask = X86GetElements(context, nMaskHigh, nMaskLow);
  186. mMask = X86GetElements(context, mMaskHigh, mMaskLow);
  187. Operand nPart = context.AddIntrinsic(Intrinsic.X86Pshufb, n, nMask);
  188. Operand mPart = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mMask);
  189. return context.AddIntrinsic(Intrinsic.X86Por, nPart, mPart);
  190. });
  191. }
  192. else
  193. {
  194. Operand res = GetVecA32(op.Qd);
  195. for (int index = 0; index < elems; index++)
  196. {
  197. Operand extract;
  198. if (byteOff >= elems)
  199. {
  200. extract = EmitVectorExtractZx32(context, op.Qm, op.Im + (byteOff - elems), op.Size);
  201. }
  202. else
  203. {
  204. extract = EmitVectorExtractZx32(context, op.Qn, op.In + byteOff, op.Size);
  205. }
  206. byteOff++;
  207. res = EmitVectorInsert(context, res, extract, op.Id + index, op.Size);
  208. }
  209. context.Copy(GetVecA32(op.Qd), res);
  210. }
  211. }
  212. public static void Vmov_S(ArmEmitterContext context)
  213. {
  214. if (Optimizations.FastFP && Optimizations.UseSse2)
  215. {
  216. EmitScalarUnaryOpF32(context, 0, 0);
  217. }
  218. else
  219. {
  220. EmitScalarUnaryOpF32(context, (op1) => op1);
  221. }
  222. }
  223. public static void Vmovn(ArmEmitterContext context)
  224. {
  225. EmitVectorUnaryNarrowOp32(context, (op1) => op1);
  226. }
  227. public static void Vneg_S(ArmEmitterContext context)
  228. {
  229. OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
  230. if (Optimizations.UseSse2)
  231. {
  232. EmitScalarUnaryOpSimd32(context, (m) =>
  233. {
  234. if ((op.Size & 1) == 0)
  235. {
  236. Operand mask = X86GetScalar(context, -0f);
  237. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, m);
  238. }
  239. else
  240. {
  241. Operand mask = X86GetScalar(context, -0d);
  242. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, m);
  243. }
  244. });
  245. }
  246. else
  247. {
  248. EmitScalarUnaryOpF32(context, (op1) => context.Negate(op1));
  249. }
  250. }
  251. public static void Vnmul_S(ArmEmitterContext context)
  252. {
  253. OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
  254. if (Optimizations.UseSse2)
  255. {
  256. EmitScalarBinaryOpSimd32(context, (n, m) =>
  257. {
  258. if ((op.Size & 1) == 0)
  259. {
  260. Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
  261. Operand mask = X86GetScalar(context, -0f);
  262. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, res);
  263. }
  264. else
  265. {
  266. Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
  267. Operand mask = X86GetScalar(context, -0d);
  268. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
  269. }
  270. });
  271. }
  272. else
  273. {
  274. EmitScalarBinaryOpF32(context, (op1, op2) => context.Negate(context.Multiply(op1, op2)));
  275. }
  276. }
  277. public static void Vnmla_S(ArmEmitterContext context)
  278. {
  279. OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
  280. if (Optimizations.FastFP && Optimizations.UseSse2)
  281. {
  282. EmitScalarTernaryOpSimd32(context, (d, n, m) =>
  283. {
  284. if ((op.Size & 1) == 0)
  285. {
  286. Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
  287. res = context.AddIntrinsic(Intrinsic.X86Addss, d, res);
  288. Operand mask = X86GetScalar(context, -0f);
  289. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, res);
  290. }
  291. else
  292. {
  293. Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
  294. res = context.AddIntrinsic(Intrinsic.X86Addsd, d, res);
  295. Operand mask = X86GetScalar(context, -0d);
  296. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
  297. }
  298. });
  299. }
  300. else if (Optimizations.FastFP)
  301. {
  302. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  303. {
  304. return context.Negate(context.Add(op1, context.Multiply(op2, op3)));
  305. });
  306. }
  307. else
  308. {
  309. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  310. {
  311. return EmitSoftFloatCall(context, SoftFloat32.FPNegMulAdd, SoftFloat64.FPNegMulAdd, op1, op2, op3);
  312. });
  313. }
  314. }
  315. public static void Vnmls_S(ArmEmitterContext context)
  316. {
  317. OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
  318. if (Optimizations.FastFP && Optimizations.UseSse2)
  319. {
  320. EmitScalarTernaryOpSimd32(context, (d, n, m) =>
  321. {
  322. if ((op.Size & 1) == 0)
  323. {
  324. Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
  325. Operand mask = X86GetScalar(context, -0f);
  326. d = context.AddIntrinsic(Intrinsic.X86Xorps, mask, d);
  327. return context.AddIntrinsic(Intrinsic.X86Addss, d, res);
  328. }
  329. else
  330. {
  331. Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
  332. Operand mask = X86GetScalar(context, -0d);
  333. d = context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
  334. return context.AddIntrinsic(Intrinsic.X86Addsd, d, res);
  335. }
  336. });
  337. }
  338. else if (Optimizations.FastFP)
  339. {
  340. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  341. {
  342. return context.Add(context.Negate(op1), context.Multiply(op2, op3));
  343. });
  344. }
  345. else
  346. {
  347. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  348. {
  349. return EmitSoftFloatCall(context, SoftFloat32.FPNegMulSub, SoftFloat64.FPNegMulSub, op1, op2, op3);
  350. });
  351. }
  352. }
  353. public static void Vneg_V(ArmEmitterContext context)
  354. {
  355. OpCode32Simd op = (OpCode32Simd)context.CurrOp;
  356. if (op.F)
  357. {
  358. if (Optimizations.UseSse2)
  359. {
  360. EmitVectorUnaryOpSimd32(context, (m) =>
  361. {
  362. if ((op.Size & 1) == 0)
  363. {
  364. Operand mask = X86GetScalar(context, -0f);
  365. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, m);
  366. }
  367. else
  368. {
  369. Operand mask = X86GetScalar(context, -0d);
  370. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, m);
  371. }
  372. });
  373. }
  374. else
  375. {
  376. EmitVectorUnaryOpF32(context, (op1) => context.Negate(op1));
  377. }
  378. }
  379. else
  380. {
  381. EmitVectorUnaryOpSx32(context, (op1) => context.Negate(op1));
  382. }
  383. }
  384. public static void Vdiv_S(ArmEmitterContext context)
  385. {
  386. if (Optimizations.FastFP && Optimizations.UseSse2)
  387. {
  388. EmitScalarBinaryOpF32(context, Intrinsic.X86Divss, Intrinsic.X86Divsd);
  389. }
  390. else if (Optimizations.FastFP)
  391. {
  392. EmitScalarBinaryOpF32(context, (op1, op2) => context.Divide(op1, op2));
  393. }
  394. else
  395. {
  396. EmitScalarBinaryOpF32(context, (op1, op2) =>
  397. {
  398. return EmitSoftFloatCall(context, SoftFloat32.FPDiv, SoftFloat64.FPDiv, op1, op2);
  399. });
  400. }
  401. }
  402. public static void Vmaxnm_S(ArmEmitterContext context)
  403. {
  404. if (Optimizations.FastFP && Optimizations.UseSse41)
  405. {
  406. EmitSse41MaxMinNumOpF32(context, true, true);
  407. }
  408. else
  409. {
  410. EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, SoftFloat32.FPMaxNum, SoftFloat64.FPMaxNum, op1, op2));
  411. }
  412. }
  413. public static void Vmaxnm_V(ArmEmitterContext context)
  414. {
  415. if (Optimizations.FastFP && Optimizations.UseSse41)
  416. {
  417. EmitSse41MaxMinNumOpF32(context, true, false);
  418. }
  419. else
  420. {
  421. EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMaxNumFpscr, SoftFloat64.FPMaxNumFpscr, op1, op2));
  422. }
  423. }
  424. public static void Vminnm_S(ArmEmitterContext context)
  425. {
  426. if (Optimizations.FastFP && Optimizations.UseSse41)
  427. {
  428. EmitSse41MaxMinNumOpF32(context, false, true);
  429. }
  430. else
  431. {
  432. EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, SoftFloat32.FPMinNum, SoftFloat64.FPMinNum, op1, op2));
  433. }
  434. }
  435. public static void Vminnm_V(ArmEmitterContext context)
  436. {
  437. if (Optimizations.FastFP && Optimizations.UseSse41)
  438. {
  439. EmitSse41MaxMinNumOpF32(context, false, false);
  440. }
  441. else
  442. {
  443. EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMinNumFpscr, SoftFloat64.FPMinNumFpscr, op1, op2));
  444. }
  445. }
  446. public static void Vmax_V(ArmEmitterContext context)
  447. {
  448. if (Optimizations.FastFP && Optimizations.UseSse2)
  449. {
  450. EmitVectorBinaryOpF32(context, Intrinsic.X86Maxps, Intrinsic.X86Maxpd);
  451. }
  452. else
  453. {
  454. EmitVectorBinaryOpF32(context, (op1, op2) =>
  455. {
  456. return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMaxFpscr, SoftFloat64.FPMaxFpscr, op1, op2);
  457. });
  458. }
  459. }
  460. public static void Vmax_I(ArmEmitterContext context)
  461. {
  462. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  463. if (op.U)
  464. {
  465. if (Optimizations.UseSse2)
  466. {
  467. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PmaxuInstruction[op.Size], op1, op2));
  468. }
  469. else
  470. {
  471. EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreaterUI(op1, op2), op1, op2));
  472. }
  473. }
  474. else
  475. {
  476. if (Optimizations.UseSse2)
  477. {
  478. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PmaxsInstruction[op.Size], op1, op2));
  479. }
  480. else
  481. {
  482. EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreater(op1, op2), op1, op2));
  483. }
  484. }
  485. }
  486. public static void Vmin_V(ArmEmitterContext context)
  487. {
  488. if (Optimizations.FastFP && Optimizations.UseSse2)
  489. {
  490. EmitVectorBinaryOpF32(context, Intrinsic.X86Minps, Intrinsic.X86Minpd);
  491. }
  492. else
  493. {
  494. EmitVectorBinaryOpF32(context, (op1, op2) =>
  495. {
  496. return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMinFpscr, SoftFloat64.FPMinFpscr, op1, op2);
  497. });
  498. }
  499. }
  500. public static void Vmin_I(ArmEmitterContext context)
  501. {
  502. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  503. if (op.U)
  504. {
  505. if (Optimizations.UseSse2)
  506. {
  507. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PminuInstruction[op.Size], op1, op2));
  508. }
  509. else
  510. {
  511. EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLessUI(op1, op2), op1, op2));
  512. }
  513. }
  514. else
  515. {
  516. if (Optimizations.UseSse2)
  517. {
  518. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PminsInstruction[op.Size], op1, op2));
  519. }
  520. else
  521. {
  522. EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLess(op1, op2), op1, op2));
  523. }
  524. }
  525. }
  526. public static void Vmul_S(ArmEmitterContext context)
  527. {
  528. if (Optimizations.FastFP && Optimizations.UseSse2)
  529. {
  530. EmitScalarBinaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd);
  531. }
  532. else if (Optimizations.FastFP)
  533. {
  534. EmitScalarBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2));
  535. }
  536. else
  537. {
  538. EmitScalarBinaryOpF32(context, (op1, op2) =>
  539. {
  540. return EmitSoftFloatCall(context, SoftFloat32.FPMul, SoftFloat64.FPMul, op1, op2);
  541. });
  542. }
  543. }
  544. public static void Vmul_V(ArmEmitterContext context)
  545. {
  546. if (Optimizations.FastFP && Optimizations.UseSse2)
  547. {
  548. EmitVectorBinaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd);
  549. }
  550. else if (Optimizations.FastFP)
  551. {
  552. EmitVectorBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2));
  553. }
  554. else
  555. {
  556. EmitVectorBinaryOpF32(context, (op1, op2) =>
  557. {
  558. return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMulFpscr, SoftFloat64.FPMulFpscr, op1, op2);
  559. });
  560. }
  561. }
  562. public static void Vmul_I(ArmEmitterContext context)
  563. {
  564. if ((context.CurrOp as OpCode32SimdReg).U) throw new NotImplementedException("Polynomial mode not implemented");
  565. EmitVectorBinaryOpSx32(context, (op1, op2) => context.Multiply(op1, op2));
  566. }
  567. public static void Vmul_1(ArmEmitterContext context)
  568. {
  569. OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
  570. if (op.F)
  571. {
  572. if (Optimizations.FastFP && Optimizations.UseSse2)
  573. {
  574. EmitVectorByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd);
  575. }
  576. else if (Optimizations.FastFP)
  577. {
  578. EmitVectorByScalarOpF32(context, (op1, op2) => context.Multiply(op1, op2));
  579. }
  580. else
  581. {
  582. EmitVectorByScalarOpF32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMulFpscr, SoftFloat64.FPMulFpscr, op1, op2));
  583. }
  584. }
  585. else
  586. {
  587. EmitVectorByScalarOpI32(context, (op1, op2) => context.Multiply(op1, op2), false);
  588. }
  589. }
  590. public static void Vmla_S(ArmEmitterContext context)
  591. {
  592. if (Optimizations.FastFP && Optimizations.UseSse2)
  593. {
  594. EmitScalarTernaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd, Intrinsic.X86Addss, Intrinsic.X86Addsd);
  595. }
  596. else if (Optimizations.FastFP)
  597. {
  598. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  599. {
  600. return context.Add(op1, context.Multiply(op2, op3));
  601. });
  602. }
  603. else
  604. {
  605. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  606. {
  607. return EmitSoftFloatCall(context, SoftFloat32.FPMulAdd, SoftFloat64.FPMulAdd, op1, op2, op3);
  608. });
  609. }
  610. }
  611. public static void Vmla_V(ArmEmitterContext context)
  612. {
  613. if (Optimizations.FastFP && Optimizations.UseSse2)
  614. {
  615. EmitVectorTernaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Addps, Intrinsic.X86Addpd);
  616. }
  617. else if (Optimizations.FastFP)
  618. {
  619. EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
  620. }
  621. else
  622. {
  623. EmitVectorTernaryOpF32(context, (op1, op2, op3) =>
  624. {
  625. return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMulAddFpscr, SoftFloat64.FPMulAddFpscr, op1, op2, op3);
  626. });
  627. }
  628. }
  629. public static void Vmla_I(ArmEmitterContext context)
  630. {
  631. EmitVectorTernaryOpZx32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
  632. }
  633. public static void Vmla_1(ArmEmitterContext context)
  634. {
  635. OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
  636. if (op.F)
  637. {
  638. if (Optimizations.FastFP && Optimizations.UseSse2)
  639. {
  640. EmitVectorsByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Addps, Intrinsic.X86Addpd);
  641. }
  642. else if (Optimizations.FastFP)
  643. {
  644. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
  645. }
  646. else
  647. {
  648. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMulAddFpscr, SoftFloat64.FPMulAddFpscr, op1, op2, op3));
  649. }
  650. }
  651. else
  652. {
  653. EmitVectorsByScalarOpI32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)), false);
  654. }
  655. }
  656. public static void Vmls_S(ArmEmitterContext context)
  657. {
  658. if (Optimizations.FastFP && Optimizations.UseSse2)
  659. {
  660. EmitScalarTernaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd, Intrinsic.X86Subss, Intrinsic.X86Subsd);
  661. }
  662. else if (Optimizations.FastFP)
  663. {
  664. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  665. {
  666. return context.Subtract(op1, context.Multiply(op2, op3));
  667. });
  668. }
  669. else
  670. {
  671. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  672. {
  673. return EmitSoftFloatCall(context, SoftFloat32.FPMulSub, SoftFloat64.FPMulSub, op1, op2, op3);
  674. });
  675. }
  676. }
  677. public static void Vmls_V(ArmEmitterContext context)
  678. {
  679. if (Optimizations.FastFP && Optimizations.UseSse2)
  680. {
  681. EmitVectorTernaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Subps, Intrinsic.X86Subpd);
  682. }
  683. else if (Optimizations.FastFP)
  684. {
  685. EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
  686. }
  687. else
  688. {
  689. EmitVectorTernaryOpF32(context, (op1, op2, op3) =>
  690. {
  691. return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMulSubFpscr, SoftFloat64.FPMulSubFpscr, op1, op2, op3);
  692. });
  693. }
  694. }
  695. public static void Vmls_I(ArmEmitterContext context)
  696. {
  697. EmitVectorTernaryOpZx32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
  698. }
  699. public static void Vmls_1(ArmEmitterContext context)
  700. {
  701. OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
  702. if (op.F)
  703. {
  704. if (Optimizations.FastFP && Optimizations.UseSse2)
  705. {
  706. EmitVectorsByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Subps, Intrinsic.X86Subpd);
  707. }
  708. else if (Optimizations.FastFP)
  709. {
  710. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
  711. }
  712. else
  713. {
  714. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPMulSubFpscr, SoftFloat64.FPMulSubFpscr, op1, op2, op3));
  715. }
  716. }
  717. else
  718. {
  719. EmitVectorsByScalarOpI32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)), false);
  720. }
  721. }
  722. public static void Vpadd_V(ArmEmitterContext context)
  723. {
  724. if (Optimizations.FastFP && Optimizations.UseSse2)
  725. {
  726. EmitSse2VectorPairwiseOpF32(context, Intrinsic.X86Addps);
  727. }
  728. else
  729. {
  730. EmitVectorPairwiseOpF32(context, (op1, op2) => context.Add(op1, op2));
  731. }
  732. }
  733. public static void Vpadd_I(ArmEmitterContext context)
  734. {
  735. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  736. if (Optimizations.UseSsse3)
  737. {
  738. EmitSsse3VectorPairwiseOp32(context, X86PaddInstruction);
  739. }
  740. else
  741. {
  742. EmitVectorPairwiseOpI32(context, (op1, op2) => context.Add(op1, op2), !op.U);
  743. }
  744. }
  745. public static void Vrev(ArmEmitterContext context)
  746. {
  747. OpCode32SimdRev op = (OpCode32SimdRev)context.CurrOp;
  748. if (Optimizations.UseSsse3)
  749. {
  750. EmitVectorUnaryOpSimd32(context, (op1) =>
  751. {
  752. Operand mask;
  753. switch (op.Size)
  754. {
  755. case 3:
  756. // Rev64
  757. switch (op.Opc)
  758. {
  759. case 0:
  760. mask = X86GetElements(context, 0x08090a0b0c0d0e0fL, 0x0001020304050607L);
  761. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  762. case 1:
  763. mask = X86GetElements(context, 0x09080b0a0d0c0f0eL, 0x0100030205040706L);
  764. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  765. case 2:
  766. return context.AddIntrinsic(Intrinsic.X86Shufps, op1, op1, Const(1 | (0 << 2) | (3 << 4) | (2 << 6)));
  767. }
  768. break;
  769. case 2:
  770. // Rev32
  771. switch (op.Opc)
  772. {
  773. case 0:
  774. mask = X86GetElements(context, 0x0c0d0e0f_08090a0bL, 0x04050607_00010203L);
  775. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  776. case 1:
  777. mask = X86GetElements(context, 0x0d0c0f0e_09080b0aL, 0x05040706_01000302L);
  778. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  779. }
  780. break;
  781. case 1:
  782. // Rev16
  783. mask = X86GetElements(context, 0x0e0f_0c0d_0a0b_0809L, 0x_0607_0405_0203_0001L);
  784. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  785. }
  786. throw new InvalidOperationException("Invalid VREV Opcode + Size combo."); // Should be unreachable.
  787. });
  788. }
  789. else
  790. {
  791. EmitVectorUnaryOpZx32(context, (op1) =>
  792. {
  793. switch (op.Opc)
  794. {
  795. case 0:
  796. switch (op.Size) // Swap bytes.
  797. {
  798. case 1:
  799. return InstEmitAluHelper.EmitReverseBytes16_32Op(context, op1);
  800. case 2:
  801. case 3:
  802. return context.ByteSwap(op1);
  803. }
  804. break;
  805. case 1:
  806. switch (op.Size)
  807. {
  808. case 2:
  809. return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff0000)), Const(16)),
  810. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x0000ffff)), Const(16)));
  811. case 3:
  812. return context.BitwiseOr(
  813. context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff000000000000ul)), Const(48)),
  814. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x000000000000fffful)), Const(48))),
  815. context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0x0000ffff00000000ul)), Const(16)),
  816. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000ffff0000ul)), Const(16))));
  817. }
  818. break;
  819. case 2:
  820. // Swap upper and lower halves.
  821. return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffffffff00000000ul)), Const(32)),
  822. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000fffffffful)), Const(32)));
  823. }
  824. throw new InvalidOperationException("Invalid VREV Opcode + Size combo."); // Should be unreachable.
  825. });
  826. }
  827. }
  828. public static void Vrecpe(ArmEmitterContext context)
  829. {
  830. OpCode32SimdSqrte op = (OpCode32SimdSqrte)context.CurrOp;
  831. if (op.F)
  832. {
  833. int sizeF = op.Size & 1;
  834. if (Optimizations.FastFP && Optimizations.UseSse2 && sizeF == 0)
  835. {
  836. EmitVectorUnaryOpF32(context, Intrinsic.X86Rcpps, 0);
  837. }
  838. else
  839. {
  840. EmitVectorUnaryOpF32(context, (op1) =>
  841. {
  842. return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPRecipEstimateFpscr, SoftFloat64.FPRecipEstimateFpscr, op1);
  843. });
  844. }
  845. }
  846. else
  847. {
  848. throw new NotImplementedException("Integer Vrecpe not currently implemented.");
  849. }
  850. }
  851. public static void Vrecps(ArmEmitterContext context)
  852. {
  853. if (Optimizations.FastFP && Optimizations.UseSse2)
  854. {
  855. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  856. bool single = (op.Size & 1) == 0;
  857. // (2 - (n*m))
  858. EmitVectorBinaryOpSimd32(context, (n, m) =>
  859. {
  860. if (single)
  861. {
  862. Operand maskTwo = X86GetAllElements(context, 2f);
  863. Operand res = context.AddIntrinsic(Intrinsic.X86Mulps, n, m);
  864. return context.AddIntrinsic(Intrinsic.X86Subps, maskTwo, res);
  865. }
  866. else
  867. {
  868. Operand maskTwo = X86GetAllElements(context, 2d);
  869. Operand res = context.AddIntrinsic(Intrinsic.X86Mulpd, n, m);
  870. return context.AddIntrinsic(Intrinsic.X86Subpd, maskTwo, res);
  871. }
  872. });
  873. }
  874. else
  875. {
  876. EmitVectorBinaryOpF32(context, (op1, op2) =>
  877. {
  878. return EmitSoftFloatCall(context, SoftFloat32.FPRecipStep, SoftFloat64.FPRecipStep, op1, op2);
  879. });
  880. }
  881. }
  882. public static void Vrsqrte(ArmEmitterContext context)
  883. {
  884. OpCode32SimdSqrte op = (OpCode32SimdSqrte)context.CurrOp;
  885. if (op.F)
  886. {
  887. int sizeF = op.Size & 1;
  888. if (Optimizations.FastFP && Optimizations.UseSse2 && sizeF == 0)
  889. {
  890. EmitVectorUnaryOpF32(context, Intrinsic.X86Rsqrtps, 0);
  891. }
  892. else
  893. {
  894. EmitVectorUnaryOpF32(context, (op1) =>
  895. {
  896. return EmitSoftFloatCallDefaultFpscr(context, SoftFloat32.FPRSqrtEstimateFpscr, SoftFloat64.FPRSqrtEstimateFpscr, op1);
  897. });
  898. }
  899. }
  900. else
  901. {
  902. throw new NotImplementedException("Integer Vrsqrte not currently implemented.");
  903. }
  904. }
  905. public static void Vrsqrts(ArmEmitterContext context)
  906. {
  907. if (Optimizations.FastFP && Optimizations.UseSse2)
  908. {
  909. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  910. bool single = (op.Size & 1) == 0;
  911. // (3 - (n*m)) / 2
  912. EmitVectorBinaryOpSimd32(context, (n, m) =>
  913. {
  914. if (single)
  915. {
  916. Operand maskHalf = X86GetAllElements(context, 0.5f);
  917. Operand maskThree = X86GetAllElements(context, 3f);
  918. Operand res = context.AddIntrinsic(Intrinsic.X86Mulps, n, m);
  919. res = context.AddIntrinsic(Intrinsic.X86Subps, maskThree, res);
  920. return context.AddIntrinsic(Intrinsic.X86Mulps, maskHalf, res);
  921. }
  922. else
  923. {
  924. Operand maskHalf = X86GetAllElements(context, 0.5d);
  925. Operand maskThree = X86GetAllElements(context, 3d);
  926. Operand res = context.AddIntrinsic(Intrinsic.X86Mulpd, n, m);
  927. res = context.AddIntrinsic(Intrinsic.X86Subpd, maskThree, res);
  928. return context.AddIntrinsic(Intrinsic.X86Mulpd, maskHalf, res);
  929. }
  930. });
  931. }
  932. else
  933. {
  934. EmitVectorBinaryOpF32(context, (op1, op2) =>
  935. {
  936. return EmitSoftFloatCall(context, SoftFloat32.FPRSqrtStep, SoftFloat64.FPRSqrtStep, op1, op2);
  937. });
  938. }
  939. }
  940. public static void Vsel(ArmEmitterContext context)
  941. {
  942. OpCode32SimdSel op = (OpCode32SimdSel)context.CurrOp;
  943. Operand condition = null;
  944. switch (op.Cc)
  945. {
  946. case OpCode32SimdSelMode.Eq:
  947. condition = GetCondTrue(context, Condition.Eq);
  948. break;
  949. case OpCode32SimdSelMode.Ge:
  950. condition = GetCondTrue(context, Condition.Ge);
  951. break;
  952. case OpCode32SimdSelMode.Gt:
  953. condition = GetCondTrue(context, Condition.Gt);
  954. break;
  955. case OpCode32SimdSelMode.Vs:
  956. condition = GetCondTrue(context, Condition.Vs);
  957. break;
  958. }
  959. EmitScalarBinaryOpI32(context, (op1, op2) =>
  960. {
  961. return context.ConditionalSelect(condition, op1, op2);
  962. });
  963. }
  964. public static void Vsqrt_S(ArmEmitterContext context)
  965. {
  966. if (Optimizations.FastFP && Optimizations.UseSse2)
  967. {
  968. EmitScalarUnaryOpF32(context, Intrinsic.X86Sqrtss, Intrinsic.X86Sqrtsd);
  969. }
  970. else
  971. {
  972. EmitScalarUnaryOpF32(context, (op1) =>
  973. {
  974. return EmitSoftFloatCall(context, SoftFloat32.FPSqrt, SoftFloat64.FPSqrt, op1);
  975. });
  976. }
  977. }
  978. public static void Vsub_S(ArmEmitterContext context)
  979. {
  980. if (Optimizations.FastFP && Optimizations.UseSse2)
  981. {
  982. EmitScalarBinaryOpF32(context, Intrinsic.X86Subss, Intrinsic.X86Subsd);
  983. }
  984. else
  985. {
  986. EmitScalarBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2));
  987. }
  988. }
  989. public static void Vsub_V(ArmEmitterContext context)
  990. {
  991. if (Optimizations.FastFP && Optimizations.UseSse2)
  992. {
  993. EmitVectorBinaryOpF32(context, Intrinsic.X86Subps, Intrinsic.X86Subpd);
  994. }
  995. else
  996. {
  997. EmitVectorBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2));
  998. }
  999. }
  1000. public static void Vsub_I(ArmEmitterContext context)
  1001. {
  1002. if (Optimizations.UseSse2)
  1003. {
  1004. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  1005. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PsubInstruction[op.Size], op1, op2));
  1006. }
  1007. else
  1008. {
  1009. EmitVectorBinaryOpZx32(context, (op1, op2) => context.Subtract(op1, op2));
  1010. }
  1011. }
  1012. private static void EmitSse41MaxMinNumOpF32(ArmEmitterContext context, bool isMaxNum, bool scalar)
  1013. {
  1014. IOpCode32Simd op = (IOpCode32Simd)context.CurrOp;
  1015. Func<Operand, Operand, Operand> genericEmit = (n, m) =>
  1016. {
  1017. Operand nNum = context.Copy(n);
  1018. Operand mNum = context.Copy(m);
  1019. Operand nQNaNMask = InstEmit.EmitSse2VectorIsQNaNOpF(context, nNum);
  1020. Operand mQNaNMask = InstEmit.EmitSse2VectorIsQNaNOpF(context, mNum);
  1021. int sizeF = op.Size & 1;
  1022. if (sizeF == 0)
  1023. {
  1024. Operand negInfMask = X86GetAllElements(context, isMaxNum ? float.NegativeInfinity : float.PositiveInfinity);
  1025. Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnps, mQNaNMask, nQNaNMask);
  1026. Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnps, nQNaNMask, mQNaNMask);
  1027. nNum = context.AddIntrinsic(Intrinsic.X86Blendvps, nNum, negInfMask, nMask);
  1028. mNum = context.AddIntrinsic(Intrinsic.X86Blendvps, mNum, negInfMask, mMask);
  1029. return context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxps : Intrinsic.X86Minps, nNum, mNum);
  1030. }
  1031. else /* if (sizeF == 1) */
  1032. {
  1033. Operand negInfMask = X86GetAllElements(context, isMaxNum ? double.NegativeInfinity : double.PositiveInfinity);
  1034. Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnpd, mQNaNMask, nQNaNMask);
  1035. Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnpd, nQNaNMask, mQNaNMask);
  1036. nNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, nNum, negInfMask, nMask);
  1037. mNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, mNum, negInfMask, mMask);
  1038. return context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxpd : Intrinsic.X86Minpd, nNum, mNum);
  1039. }
  1040. };
  1041. if (scalar)
  1042. {
  1043. EmitScalarBinaryOpSimd32(context, genericEmit);
  1044. }
  1045. else
  1046. {
  1047. EmitVectorBinaryOpSimd32(context, genericEmit);
  1048. }
  1049. }
  1050. }
  1051. }