InstEmitSimdArithmetic32.cs 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. using ARMeilleure.Decoders;
  2. using ARMeilleure.IntermediateRepresentation;
  3. using ARMeilleure.Translation;
  4. using System;
  5. using System.Diagnostics;
  6. using static ARMeilleure.Instructions.InstEmitFlowHelper;
  7. using static ARMeilleure.Instructions.InstEmitHelper;
  8. using static ARMeilleure.Instructions.InstEmitSimdHelper;
  9. using static ARMeilleure.Instructions.InstEmitSimdHelper32;
  10. using static ARMeilleure.IntermediateRepresentation.OperandHelper;
  11. namespace ARMeilleure.Instructions
  12. {
  13. static partial class InstEmit32
  14. {
  15. public static void Vabs_S(ArmEmitterContext context)
  16. {
  17. OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
  18. if (Optimizations.FastFP && Optimizations.UseSse2)
  19. {
  20. EmitScalarUnaryOpSimd32(context, (m) =>
  21. {
  22. return EmitFloatAbs(context, m, (op.Size & 1) == 0, false);
  23. });
  24. }
  25. else
  26. {
  27. EmitScalarUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, nameof(Math.Abs), op1));
  28. }
  29. }
  30. public static void Vabs_V(ArmEmitterContext context)
  31. {
  32. OpCode32Simd op = (OpCode32Simd)context.CurrOp;
  33. if (op.F)
  34. {
  35. if (Optimizations.FastFP && Optimizations.UseSse2)
  36. {
  37. EmitVectorUnaryOpSimd32(context, (m) =>
  38. {
  39. return EmitFloatAbs(context, m, (op.Size & 1) == 0, true);
  40. });
  41. }
  42. else
  43. {
  44. EmitVectorUnaryOpF32(context, (op1) => EmitUnaryMathCall(context, nameof(Math.Abs), op1));
  45. }
  46. }
  47. else
  48. {
  49. EmitVectorUnaryOpSx32(context, (op1) => EmitAbs(context, op1));
  50. }
  51. }
  52. private static Operand EmitAbs(ArmEmitterContext context, Operand value)
  53. {
  54. Operand isPositive = context.ICompareGreaterOrEqual(value, Const(value.Type, 0));
  55. return context.ConditionalSelect(isPositive, value, context.Negate(value));
  56. }
  57. public static void Vadd_S(ArmEmitterContext context)
  58. {
  59. if (Optimizations.FastFP && Optimizations.UseSse2)
  60. {
  61. EmitScalarBinaryOpF32(context, Intrinsic.X86Addss, Intrinsic.X86Addsd);
  62. }
  63. else if (Optimizations.FastFP)
  64. {
  65. EmitScalarBinaryOpF32(context, (op1, op2) => context.Add(op1, op2));
  66. }
  67. else
  68. {
  69. EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, nameof(SoftFloat32.FPAdd), op1, op2));
  70. }
  71. }
  72. public static void Vadd_V(ArmEmitterContext context)
  73. {
  74. if (Optimizations.FastFP && Optimizations.UseSse2)
  75. {
  76. EmitVectorBinaryOpF32(context, Intrinsic.X86Addps, Intrinsic.X86Addpd);
  77. }
  78. else if (Optimizations.FastFP)
  79. {
  80. EmitVectorBinaryOpF32(context, (op1, op2) => context.Add(op1, op2));
  81. }
  82. else
  83. {
  84. EmitVectorBinaryOpF32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPAddFpscr), op1, op2));
  85. }
  86. }
  87. public static void Vadd_I(ArmEmitterContext context)
  88. {
  89. if (Optimizations.UseSse2)
  90. {
  91. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  92. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PaddInstruction[op.Size], op1, op2));
  93. }
  94. else
  95. {
  96. EmitVectorBinaryOpZx32(context, (op1, op2) => context.Add(op1, op2));
  97. }
  98. }
  99. public static void Vdup(ArmEmitterContext context)
  100. {
  101. OpCode32SimdDupGP op = (OpCode32SimdDupGP)context.CurrOp;
  102. Operand insert = GetIntA32(context, op.Rt);
  103. // Zero extend into an I64, then replicate. Saves the most time over elementwise inserts.
  104. insert = op.Size switch
  105. {
  106. 2 => context.Multiply(context.ZeroExtend32(OperandType.I64, insert), Const(0x0000000100000001u)),
  107. 1 => context.Multiply(context.ZeroExtend16(OperandType.I64, insert), Const(0x0001000100010001u)),
  108. 0 => context.Multiply(context.ZeroExtend8(OperandType.I64, insert), Const(0x0101010101010101u)),
  109. _ => throw new InvalidOperationException($"Invalid Vdup size \"{op.Size}\".")
  110. };
  111. InsertScalar(context, op.Vd, insert);
  112. if (op.Q)
  113. {
  114. InsertScalar(context, op.Vd + 1, insert);
  115. }
  116. }
  117. public static void Vdup_1(ArmEmitterContext context)
  118. {
  119. OpCode32SimdDupElem op = (OpCode32SimdDupElem)context.CurrOp;
  120. Operand insert = EmitVectorExtractZx32(context, op.Vm >> 1, ((op.Vm & 1) << (3 - op.Size)) + op.Index, op.Size);
  121. // Zero extend into an I64, then replicate. Saves the most time over elementwise inserts.
  122. insert = op.Size switch
  123. {
  124. 2 => context.Multiply(context.ZeroExtend32(OperandType.I64, insert), Const(0x0000000100000001u)),
  125. 1 => context.Multiply(context.ZeroExtend16(OperandType.I64, insert), Const(0x0001000100010001u)),
  126. 0 => context.Multiply(context.ZeroExtend8(OperandType.I64, insert), Const(0x0101010101010101u)),
  127. _ => throw new InvalidOperationException($"Invalid Vdup size \"{op.Size}\".")
  128. };
  129. InsertScalar(context, op.Vd, insert);
  130. if (op.Q)
  131. {
  132. InsertScalar(context, op.Vd | 1, insert);
  133. }
  134. }
  135. private static (long, long) MaskHelperByteSequence(int start, int length, int startByte)
  136. {
  137. int end = start + length;
  138. int b = startByte;
  139. long result = 0;
  140. long result2 = 0;
  141. for (int i = 0; i < 8; i++)
  142. {
  143. result |= (long)((i >= end || i < start) ? 0x80 : b++) << (i * 8);
  144. }
  145. for (int i = 8; i < 16; i++)
  146. {
  147. result2 |= (long)((i >= end || i < start) ? 0x80 : b++) << ((i - 8) * 8);
  148. }
  149. return (result2, result);
  150. }
  151. public static void Vext(ArmEmitterContext context)
  152. {
  153. OpCode32SimdExt op = (OpCode32SimdExt)context.CurrOp;
  154. int elems = op.GetBytesCount();
  155. int byteOff = op.Immediate;
  156. if (Optimizations.UseSsse3)
  157. {
  158. EmitVectorBinaryOpSimd32(context, (n, m) =>
  159. {
  160. // Writing low to high of d: start <imm> into n, overlap into m.
  161. // Then rotate n down by <imm>, m up by (elems)-imm.
  162. // Then OR them together for the result.
  163. (long nMaskHigh, long nMaskLow) = MaskHelperByteSequence(0, elems - byteOff, byteOff);
  164. (long mMaskHigh, long mMaskLow) = MaskHelperByteSequence(elems - byteOff, byteOff, 0);
  165. Operand nMask, mMask;
  166. if (!op.Q)
  167. {
  168. // Do the same operation to the bytes in the top doubleword too, as our target could be in either.
  169. nMaskHigh = nMaskLow + 0x0808080808080808L;
  170. mMaskHigh = mMaskLow + 0x0808080808080808L;
  171. }
  172. nMask = X86GetElements(context, nMaskHigh, nMaskLow);
  173. mMask = X86GetElements(context, mMaskHigh, mMaskLow);
  174. Operand nPart = context.AddIntrinsic(Intrinsic.X86Pshufb, n, nMask);
  175. Operand mPart = context.AddIntrinsic(Intrinsic.X86Pshufb, m, mMask);
  176. return context.AddIntrinsic(Intrinsic.X86Por, nPart, mPart);
  177. });
  178. }
  179. else
  180. {
  181. Operand res = GetVecA32(op.Qd);
  182. for (int index = 0; index < elems; index++)
  183. {
  184. Operand extract;
  185. if (byteOff >= elems)
  186. {
  187. extract = EmitVectorExtractZx32(context, op.Qm, op.Im + (byteOff - elems), op.Size);
  188. }
  189. else
  190. {
  191. extract = EmitVectorExtractZx32(context, op.Qn, op.In + byteOff, op.Size);
  192. }
  193. byteOff++;
  194. res = EmitVectorInsert(context, res, extract, op.Id + index, op.Size);
  195. }
  196. context.Copy(GetVecA32(op.Qd), res);
  197. }
  198. }
  199. public static void Vmov_S(ArmEmitterContext context)
  200. {
  201. if (Optimizations.FastFP && Optimizations.UseSse2)
  202. {
  203. EmitScalarUnaryOpF32(context, 0, 0);
  204. }
  205. else
  206. {
  207. EmitScalarUnaryOpF32(context, (op1) => op1);
  208. }
  209. }
  210. public static void Vmovn(ArmEmitterContext context)
  211. {
  212. EmitVectorUnaryNarrowOp32(context, (op1) => op1);
  213. }
  214. public static void Vneg_S(ArmEmitterContext context)
  215. {
  216. OpCode32SimdS op = (OpCode32SimdS)context.CurrOp;
  217. if (Optimizations.UseSse2)
  218. {
  219. EmitScalarUnaryOpSimd32(context, (m) =>
  220. {
  221. if ((op.Size & 1) == 0)
  222. {
  223. Operand mask = X86GetScalar(context, -0f);
  224. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, m);
  225. }
  226. else
  227. {
  228. Operand mask = X86GetScalar(context, -0d);
  229. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, m);
  230. }
  231. });
  232. }
  233. else
  234. {
  235. EmitScalarUnaryOpF32(context, (op1) => context.Negate(op1));
  236. }
  237. }
  238. public static void Vnmul_S(ArmEmitterContext context)
  239. {
  240. OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
  241. if (Optimizations.UseSse2)
  242. {
  243. EmitScalarBinaryOpSimd32(context, (n, m) =>
  244. {
  245. if ((op.Size & 1) == 0)
  246. {
  247. Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
  248. Operand mask = X86GetScalar(context, -0f);
  249. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, res);
  250. }
  251. else
  252. {
  253. Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
  254. Operand mask = X86GetScalar(context, -0d);
  255. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
  256. }
  257. });
  258. }
  259. else
  260. {
  261. EmitScalarBinaryOpF32(context, (op1, op2) => context.Negate(context.Multiply(op1, op2)));
  262. }
  263. }
  264. public static void Vnmla_S(ArmEmitterContext context)
  265. {
  266. OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
  267. if (Optimizations.FastFP && Optimizations.UseSse2)
  268. {
  269. EmitScalarTernaryOpSimd32(context, (d, n, m) =>
  270. {
  271. if ((op.Size & 1) == 0)
  272. {
  273. Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
  274. res = context.AddIntrinsic(Intrinsic.X86Addss, d, res);
  275. Operand mask = X86GetScalar(context, -0f);
  276. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, res);
  277. }
  278. else
  279. {
  280. Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
  281. res = context.AddIntrinsic(Intrinsic.X86Addsd, d, res);
  282. Operand mask = X86GetScalar(context, -0d);
  283. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
  284. }
  285. });
  286. }
  287. else if (Optimizations.FastFP)
  288. {
  289. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  290. {
  291. return context.Negate(context.Add(op1, context.Multiply(op2, op3)));
  292. });
  293. }
  294. else
  295. {
  296. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  297. {
  298. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPNegMulAdd), op1, op2, op3);
  299. });
  300. }
  301. }
  302. public static void Vnmls_S(ArmEmitterContext context)
  303. {
  304. OpCode32SimdRegS op = (OpCode32SimdRegS)context.CurrOp;
  305. if (Optimizations.FastFP && Optimizations.UseSse2)
  306. {
  307. EmitScalarTernaryOpSimd32(context, (d, n, m) =>
  308. {
  309. if ((op.Size & 1) == 0)
  310. {
  311. Operand res = context.AddIntrinsic(Intrinsic.X86Mulss, n, m);
  312. Operand mask = X86GetScalar(context, -0f);
  313. d = context.AddIntrinsic(Intrinsic.X86Xorps, mask, d);
  314. return context.AddIntrinsic(Intrinsic.X86Addss, d, res);
  315. }
  316. else
  317. {
  318. Operand res = context.AddIntrinsic(Intrinsic.X86Mulsd, n, m);
  319. Operand mask = X86GetScalar(context, -0d);
  320. d = context.AddIntrinsic(Intrinsic.X86Xorpd, mask, res);
  321. return context.AddIntrinsic(Intrinsic.X86Addsd, d, res);
  322. }
  323. });
  324. }
  325. else if (Optimizations.FastFP)
  326. {
  327. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  328. {
  329. return context.Add(context.Negate(op1), context.Multiply(op2, op3));
  330. });
  331. }
  332. else
  333. {
  334. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  335. {
  336. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPNegMulSub), op1, op2, op3);
  337. });
  338. }
  339. }
  340. public static void Vneg_V(ArmEmitterContext context)
  341. {
  342. OpCode32Simd op = (OpCode32Simd)context.CurrOp;
  343. if (op.F)
  344. {
  345. if (Optimizations.UseSse2)
  346. {
  347. EmitVectorUnaryOpSimd32(context, (m) =>
  348. {
  349. if ((op.Size & 1) == 0)
  350. {
  351. Operand mask = X86GetScalar(context, -0f);
  352. return context.AddIntrinsic(Intrinsic.X86Xorps, mask, m);
  353. }
  354. else
  355. {
  356. Operand mask = X86GetScalar(context, -0d);
  357. return context.AddIntrinsic(Intrinsic.X86Xorpd, mask, m);
  358. }
  359. });
  360. }
  361. else
  362. {
  363. EmitVectorUnaryOpF32(context, (op1) => context.Negate(op1));
  364. }
  365. }
  366. else
  367. {
  368. EmitVectorUnaryOpSx32(context, (op1) => context.Negate(op1));
  369. }
  370. }
  371. public static void Vdiv_S(ArmEmitterContext context)
  372. {
  373. if (Optimizations.FastFP && Optimizations.UseSse2)
  374. {
  375. EmitScalarBinaryOpF32(context, Intrinsic.X86Divss, Intrinsic.X86Divsd);
  376. }
  377. else if (Optimizations.FastFP)
  378. {
  379. EmitScalarBinaryOpF32(context, (op1, op2) => context.Divide(op1, op2));
  380. }
  381. else
  382. {
  383. EmitScalarBinaryOpF32(context, (op1, op2) =>
  384. {
  385. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPDiv), op1, op2);
  386. });
  387. }
  388. }
  389. public static void Vmaxnm_S(ArmEmitterContext context)
  390. {
  391. if (Optimizations.FastFP && Optimizations.UseSse41)
  392. {
  393. EmitSse41MaxMinNumOpF32(context, true, true);
  394. }
  395. else
  396. {
  397. EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, nameof(SoftFloat32.FPMaxNum), op1, op2));
  398. }
  399. }
  400. public static void Vmaxnm_V(ArmEmitterContext context)
  401. {
  402. if (Optimizations.FastFP && Optimizations.UseSse41)
  403. {
  404. EmitSse41MaxMinNumOpF32(context, true, false);
  405. }
  406. else
  407. {
  408. EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMaxNumFpscr), op1, op2));
  409. }
  410. }
  411. public static void Vminnm_S(ArmEmitterContext context)
  412. {
  413. if (Optimizations.FastFP && Optimizations.UseSse41)
  414. {
  415. EmitSse41MaxMinNumOpF32(context, false, true);
  416. }
  417. else
  418. {
  419. EmitScalarBinaryOpF32(context, (op1, op2) => EmitSoftFloatCall(context, nameof(SoftFloat32.FPMinNum), op1, op2));
  420. }
  421. }
  422. public static void Vminnm_V(ArmEmitterContext context)
  423. {
  424. if (Optimizations.FastFP && Optimizations.UseSse41)
  425. {
  426. EmitSse41MaxMinNumOpF32(context, false, false);
  427. }
  428. else
  429. {
  430. EmitVectorBinaryOpSx32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMinNumFpscr), op1, op2));
  431. }
  432. }
  433. public static void Vmax_V(ArmEmitterContext context)
  434. {
  435. if (Optimizations.FastFP && Optimizations.UseSse2)
  436. {
  437. EmitVectorBinaryOpF32(context, Intrinsic.X86Maxps, Intrinsic.X86Maxpd);
  438. }
  439. else
  440. {
  441. EmitVectorBinaryOpF32(context, (op1, op2) =>
  442. {
  443. return EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMaxFpscr), op1, op2);
  444. });
  445. }
  446. }
  447. public static void Vmax_I(ArmEmitterContext context)
  448. {
  449. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  450. if (op.U)
  451. {
  452. if (Optimizations.UseSse2)
  453. {
  454. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PmaxuInstruction[op.Size], op1, op2));
  455. }
  456. else
  457. {
  458. EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreaterUI(op1, op2), op1, op2));
  459. }
  460. }
  461. else
  462. {
  463. if (Optimizations.UseSse2)
  464. {
  465. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PmaxsInstruction[op.Size], op1, op2));
  466. }
  467. else
  468. {
  469. EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareGreater(op1, op2), op1, op2));
  470. }
  471. }
  472. }
  473. public static void Vmin_V(ArmEmitterContext context)
  474. {
  475. if (Optimizations.FastFP && Optimizations.UseSse2)
  476. {
  477. EmitVectorBinaryOpF32(context, Intrinsic.X86Minps, Intrinsic.X86Minpd);
  478. }
  479. else
  480. {
  481. EmitVectorBinaryOpF32(context, (op1, op2) =>
  482. {
  483. return EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMinFpscr), op1, op2);
  484. });
  485. }
  486. }
  487. public static void Vmin_I(ArmEmitterContext context)
  488. {
  489. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  490. if (op.U)
  491. {
  492. if (Optimizations.UseSse2)
  493. {
  494. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PminuInstruction[op.Size], op1, op2));
  495. }
  496. else
  497. {
  498. EmitVectorBinaryOpZx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLessUI(op1, op2), op1, op2));
  499. }
  500. }
  501. else
  502. {
  503. if (Optimizations.UseSse2)
  504. {
  505. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PminsInstruction[op.Size], op1, op2));
  506. }
  507. else
  508. {
  509. EmitVectorBinaryOpSx32(context, (op1, op2) => context.ConditionalSelect(context.ICompareLess(op1, op2), op1, op2));
  510. }
  511. }
  512. }
  513. public static void Vmla_S(ArmEmitterContext context)
  514. {
  515. if (Optimizations.FastFP && Optimizations.UseSse2)
  516. {
  517. EmitScalarTernaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd, Intrinsic.X86Addss, Intrinsic.X86Addsd);
  518. }
  519. else if (Optimizations.FastFP)
  520. {
  521. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  522. {
  523. return context.Add(op1, context.Multiply(op2, op3));
  524. });
  525. }
  526. else
  527. {
  528. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  529. {
  530. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulAdd), op1, op2, op3);
  531. });
  532. }
  533. }
  534. public static void Vmla_V(ArmEmitterContext context)
  535. {
  536. if (Optimizations.FastFP && Optimizations.UseSse2)
  537. {
  538. EmitVectorTernaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Addps, Intrinsic.X86Addpd);
  539. }
  540. else if (Optimizations.FastFP)
  541. {
  542. EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
  543. }
  544. else
  545. {
  546. EmitVectorTernaryOpF32(context, (op1, op2, op3) =>
  547. {
  548. return EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMulAddFpscr), op1, op2, op3);
  549. });
  550. }
  551. }
  552. public static void Vmla_I(ArmEmitterContext context)
  553. {
  554. EmitVectorTernaryOpZx32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
  555. }
  556. public static void Vmla_1(ArmEmitterContext context)
  557. {
  558. OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
  559. if (op.F)
  560. {
  561. if (Optimizations.FastFP && Optimizations.UseSse2)
  562. {
  563. EmitVectorsByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Addps, Intrinsic.X86Addpd);
  564. }
  565. else if (Optimizations.FastFP)
  566. {
  567. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)));
  568. }
  569. else
  570. {
  571. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMulAddFpscr), op1, op2, op3));
  572. }
  573. }
  574. else
  575. {
  576. EmitVectorsByScalarOpI32(context, (op1, op2, op3) => context.Add(op1, context.Multiply(op2, op3)), false);
  577. }
  578. }
  579. public static void Vmls_S(ArmEmitterContext context)
  580. {
  581. if (Optimizations.FastFP && Optimizations.UseSse2)
  582. {
  583. EmitScalarTernaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd, Intrinsic.X86Subss, Intrinsic.X86Subsd);
  584. }
  585. else if (Optimizations.FastFP)
  586. {
  587. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  588. {
  589. return context.Subtract(op1, context.Multiply(op2, op3));
  590. });
  591. }
  592. else
  593. {
  594. EmitScalarTernaryOpF32(context, (op1, op2, op3) =>
  595. {
  596. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMulSub), op1, op2, op3);
  597. });
  598. }
  599. }
  600. public static void Vmls_V(ArmEmitterContext context)
  601. {
  602. if (Optimizations.FastFP && Optimizations.UseSse2)
  603. {
  604. EmitVectorTernaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Subps, Intrinsic.X86Subpd);
  605. }
  606. else if (Optimizations.FastFP)
  607. {
  608. EmitVectorTernaryOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
  609. }
  610. else
  611. {
  612. EmitVectorTernaryOpF32(context, (op1, op2, op3) =>
  613. {
  614. return EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMulSubFpscr), op1, op2, op3);
  615. });
  616. }
  617. }
  618. public static void Vmls_I(ArmEmitterContext context)
  619. {
  620. EmitVectorTernaryOpZx32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
  621. }
  622. public static void Vmls_1(ArmEmitterContext context)
  623. {
  624. OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
  625. if (op.F)
  626. {
  627. if (Optimizations.FastFP && Optimizations.UseSse2)
  628. {
  629. EmitVectorsByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd, Intrinsic.X86Subps, Intrinsic.X86Subpd);
  630. }
  631. else if (Optimizations.FastFP)
  632. {
  633. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)));
  634. }
  635. else
  636. {
  637. EmitVectorsByScalarOpF32(context, (op1, op2, op3) => EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMulSubFpscr), op1, op2, op3));
  638. }
  639. }
  640. else
  641. {
  642. EmitVectorsByScalarOpI32(context, (op1, op2, op3) => context.Subtract(op1, context.Multiply(op2, op3)), false);
  643. }
  644. }
  645. public static void Vmlsl_I(ArmEmitterContext context)
  646. {
  647. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  648. EmitVectorTernaryLongOpI32(context, (opD, op1, op2) => context.Subtract(opD, context.Multiply(op1, op2)), !op.U);
  649. }
  650. public static void Vmul_S(ArmEmitterContext context)
  651. {
  652. if (Optimizations.FastFP && Optimizations.UseSse2)
  653. {
  654. EmitScalarBinaryOpF32(context, Intrinsic.X86Mulss, Intrinsic.X86Mulsd);
  655. }
  656. else if (Optimizations.FastFP)
  657. {
  658. EmitScalarBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2));
  659. }
  660. else
  661. {
  662. EmitScalarBinaryOpF32(context, (op1, op2) =>
  663. {
  664. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPMul), op1, op2);
  665. });
  666. }
  667. }
  668. public static void Vmul_V(ArmEmitterContext context)
  669. {
  670. if (Optimizations.FastFP && Optimizations.UseSse2)
  671. {
  672. EmitVectorBinaryOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd);
  673. }
  674. else if (Optimizations.FastFP)
  675. {
  676. EmitVectorBinaryOpF32(context, (op1, op2) => context.Multiply(op1, op2));
  677. }
  678. else
  679. {
  680. EmitVectorBinaryOpF32(context, (op1, op2) =>
  681. {
  682. return EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMulFpscr), op1, op2);
  683. });
  684. }
  685. }
  686. public static void Vmul_I(ArmEmitterContext context)
  687. {
  688. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  689. if (op.U) // This instruction is always signed, U indicates polynomial mode.
  690. {
  691. EmitVectorBinaryOpZx32(context, (op1, op2) => EmitPolynomialMultiply(context, op1, op2, 8 << op.Size));
  692. }
  693. else
  694. {
  695. EmitVectorBinaryOpSx32(context, (op1, op2) => context.Multiply(op1, op2));
  696. }
  697. }
  698. public static void Vmul_1(ArmEmitterContext context)
  699. {
  700. OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
  701. if (op.F)
  702. {
  703. if (Optimizations.FastFP && Optimizations.UseSse2)
  704. {
  705. EmitVectorByScalarOpF32(context, Intrinsic.X86Mulps, Intrinsic.X86Mulpd);
  706. }
  707. else if (Optimizations.FastFP)
  708. {
  709. EmitVectorByScalarOpF32(context, (op1, op2) => context.Multiply(op1, op2));
  710. }
  711. else
  712. {
  713. EmitVectorByScalarOpF32(context, (op1, op2) => EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPMulFpscr), op1, op2));
  714. }
  715. }
  716. else
  717. {
  718. EmitVectorByScalarOpI32(context, (op1, op2) => context.Multiply(op1, op2), false);
  719. }
  720. }
  721. public static void Vmull_1(ArmEmitterContext context)
  722. {
  723. OpCode32SimdRegElem op = (OpCode32SimdRegElem)context.CurrOp;
  724. EmitVectorByScalarLongOpI32(context, (op1, op2) => context.Multiply(op1, op2), !op.U);
  725. }
  726. public static void Vmull_I(ArmEmitterContext context)
  727. {
  728. OpCode32SimdRegLong op = (OpCode32SimdRegLong)context.CurrOp;
  729. if (op.Polynomial)
  730. {
  731. EmitVectorBinaryLongOpI32(context, (op1, op2) => EmitPolynomialMultiply(context, op1, op2, 8 << op.Size), false);
  732. }
  733. else
  734. {
  735. EmitVectorBinaryLongOpI32(context, (op1, op2) => context.Multiply(op1, op2), !op.U);
  736. }
  737. }
  738. public static void Vpadd_V(ArmEmitterContext context)
  739. {
  740. if (Optimizations.FastFP && Optimizations.UseSse2)
  741. {
  742. EmitSse2VectorPairwiseOpF32(context, Intrinsic.X86Addps);
  743. }
  744. else
  745. {
  746. EmitVectorPairwiseOpF32(context, (op1, op2) => context.Add(op1, op2));
  747. }
  748. }
  749. public static void Vpadd_I(ArmEmitterContext context)
  750. {
  751. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  752. if (Optimizations.UseSsse3)
  753. {
  754. EmitSsse3VectorPairwiseOp32(context, X86PaddInstruction);
  755. }
  756. else
  757. {
  758. EmitVectorPairwiseOpI32(context, (op1, op2) => context.Add(op1, op2), !op.U);
  759. }
  760. }
  761. public static void Vrev(ArmEmitterContext context)
  762. {
  763. OpCode32SimdRev op = (OpCode32SimdRev)context.CurrOp;
  764. if (Optimizations.UseSsse3)
  765. {
  766. EmitVectorUnaryOpSimd32(context, (op1) =>
  767. {
  768. Operand mask;
  769. switch (op.Size)
  770. {
  771. case 3:
  772. // Rev64
  773. switch (op.Opc)
  774. {
  775. case 0:
  776. mask = X86GetElements(context, 0x08090a0b0c0d0e0fL, 0x0001020304050607L);
  777. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  778. case 1:
  779. mask = X86GetElements(context, 0x09080b0a0d0c0f0eL, 0x0100030205040706L);
  780. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  781. case 2:
  782. return context.AddIntrinsic(Intrinsic.X86Shufps, op1, op1, Const(1 | (0 << 2) | (3 << 4) | (2 << 6)));
  783. }
  784. break;
  785. case 2:
  786. // Rev32
  787. switch (op.Opc)
  788. {
  789. case 0:
  790. mask = X86GetElements(context, 0x0c0d0e0f_08090a0bL, 0x04050607_00010203L);
  791. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  792. case 1:
  793. mask = X86GetElements(context, 0x0d0c0f0e_09080b0aL, 0x05040706_01000302L);
  794. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  795. }
  796. break;
  797. case 1:
  798. // Rev16
  799. mask = X86GetElements(context, 0x0e0f_0c0d_0a0b_0809L, 0x_0607_0405_0203_0001L);
  800. return context.AddIntrinsic(Intrinsic.X86Pshufb, op1, mask);
  801. }
  802. throw new InvalidOperationException("Invalid VREV Opcode + Size combo."); // Should be unreachable.
  803. });
  804. }
  805. else
  806. {
  807. EmitVectorUnaryOpZx32(context, (op1) =>
  808. {
  809. switch (op.Opc)
  810. {
  811. case 0:
  812. switch (op.Size) // Swap bytes.
  813. {
  814. case 1:
  815. return InstEmitAluHelper.EmitReverseBytes16_32Op(context, op1);
  816. case 2:
  817. case 3:
  818. return context.ByteSwap(op1);
  819. }
  820. break;
  821. case 1:
  822. switch (op.Size)
  823. {
  824. case 2:
  825. return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff0000)), Const(16)),
  826. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x0000ffff)), Const(16)));
  827. case 3:
  828. return context.BitwiseOr(
  829. context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffff000000000000ul)), Const(48)),
  830. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x000000000000fffful)), Const(48))),
  831. context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0x0000ffff00000000ul)), Const(16)),
  832. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000ffff0000ul)), Const(16))));
  833. }
  834. break;
  835. case 2:
  836. // Swap upper and lower halves.
  837. return context.BitwiseOr(context.ShiftRightUI(context.BitwiseAnd(op1, Const(0xffffffff00000000ul)), Const(32)),
  838. context.ShiftLeft(context.BitwiseAnd(op1, Const(0x00000000fffffffful)), Const(32)));
  839. }
  840. throw new InvalidOperationException("Invalid VREV Opcode + Size combo."); // Should be unreachable.
  841. });
  842. }
  843. }
  844. public static void Vrecpe(ArmEmitterContext context)
  845. {
  846. OpCode32SimdSqrte op = (OpCode32SimdSqrte)context.CurrOp;
  847. if (op.F)
  848. {
  849. int sizeF = op.Size & 1;
  850. if (Optimizations.FastFP && Optimizations.UseSse2 && sizeF == 0)
  851. {
  852. EmitVectorUnaryOpF32(context, Intrinsic.X86Rcpps, 0);
  853. }
  854. else
  855. {
  856. EmitVectorUnaryOpF32(context, (op1) =>
  857. {
  858. return EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPRecipEstimateFpscr), op1);
  859. });
  860. }
  861. }
  862. else
  863. {
  864. throw new NotImplementedException("Integer Vrecpe not currently implemented.");
  865. }
  866. }
  867. public static void Vrecps(ArmEmitterContext context)
  868. {
  869. if (Optimizations.FastFP && Optimizations.UseSse2)
  870. {
  871. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  872. bool single = (op.Size & 1) == 0;
  873. // (2 - (n*m))
  874. EmitVectorBinaryOpSimd32(context, (n, m) =>
  875. {
  876. if (single)
  877. {
  878. Operand maskTwo = X86GetAllElements(context, 2f);
  879. Operand res = context.AddIntrinsic(Intrinsic.X86Mulps, n, m);
  880. return context.AddIntrinsic(Intrinsic.X86Subps, maskTwo, res);
  881. }
  882. else
  883. {
  884. Operand maskTwo = X86GetAllElements(context, 2d);
  885. Operand res = context.AddIntrinsic(Intrinsic.X86Mulpd, n, m);
  886. return context.AddIntrinsic(Intrinsic.X86Subpd, maskTwo, res);
  887. }
  888. });
  889. }
  890. else
  891. {
  892. EmitVectorBinaryOpF32(context, (op1, op2) =>
  893. {
  894. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRecipStep), op1, op2);
  895. });
  896. }
  897. }
  898. public static void Vrsqrte(ArmEmitterContext context)
  899. {
  900. OpCode32SimdSqrte op = (OpCode32SimdSqrte)context.CurrOp;
  901. if (op.F)
  902. {
  903. int sizeF = op.Size & 1;
  904. if (Optimizations.FastFP && Optimizations.UseSse2 && sizeF == 0)
  905. {
  906. EmitVectorUnaryOpF32(context, Intrinsic.X86Rsqrtps, 0);
  907. }
  908. else
  909. {
  910. EmitVectorUnaryOpF32(context, (op1) =>
  911. {
  912. return EmitSoftFloatCallDefaultFpscr(context, nameof(SoftFloat32.FPRSqrtEstimateFpscr), op1);
  913. });
  914. }
  915. }
  916. else
  917. {
  918. throw new NotImplementedException("Integer Vrsqrte not currently implemented.");
  919. }
  920. }
  921. public static void Vrsqrts(ArmEmitterContext context)
  922. {
  923. if (Optimizations.FastFP && Optimizations.UseSse2)
  924. {
  925. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  926. bool single = (op.Size & 1) == 0;
  927. // (3 - (n*m)) / 2
  928. EmitVectorBinaryOpSimd32(context, (n, m) =>
  929. {
  930. if (single)
  931. {
  932. Operand maskHalf = X86GetAllElements(context, 0.5f);
  933. Operand maskThree = X86GetAllElements(context, 3f);
  934. Operand res = context.AddIntrinsic(Intrinsic.X86Mulps, n, m);
  935. res = context.AddIntrinsic(Intrinsic.X86Subps, maskThree, res);
  936. return context.AddIntrinsic(Intrinsic.X86Mulps, maskHalf, res);
  937. }
  938. else
  939. {
  940. Operand maskHalf = X86GetAllElements(context, 0.5d);
  941. Operand maskThree = X86GetAllElements(context, 3d);
  942. Operand res = context.AddIntrinsic(Intrinsic.X86Mulpd, n, m);
  943. res = context.AddIntrinsic(Intrinsic.X86Subpd, maskThree, res);
  944. return context.AddIntrinsic(Intrinsic.X86Mulpd, maskHalf, res);
  945. }
  946. });
  947. }
  948. else
  949. {
  950. EmitVectorBinaryOpF32(context, (op1, op2) =>
  951. {
  952. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPRSqrtStep), op1, op2);
  953. });
  954. }
  955. }
  956. public static void Vsel(ArmEmitterContext context)
  957. {
  958. OpCode32SimdSel op = (OpCode32SimdSel)context.CurrOp;
  959. Operand condition = null;
  960. switch (op.Cc)
  961. {
  962. case OpCode32SimdSelMode.Eq:
  963. condition = GetCondTrue(context, Condition.Eq);
  964. break;
  965. case OpCode32SimdSelMode.Ge:
  966. condition = GetCondTrue(context, Condition.Ge);
  967. break;
  968. case OpCode32SimdSelMode.Gt:
  969. condition = GetCondTrue(context, Condition.Gt);
  970. break;
  971. case OpCode32SimdSelMode.Vs:
  972. condition = GetCondTrue(context, Condition.Vs);
  973. break;
  974. }
  975. EmitScalarBinaryOpI32(context, (op1, op2) =>
  976. {
  977. return context.ConditionalSelect(condition, op1, op2);
  978. });
  979. }
  980. public static void Vsqrt_S(ArmEmitterContext context)
  981. {
  982. if (Optimizations.FastFP && Optimizations.UseSse2)
  983. {
  984. EmitScalarUnaryOpF32(context, Intrinsic.X86Sqrtss, Intrinsic.X86Sqrtsd);
  985. }
  986. else
  987. {
  988. EmitScalarUnaryOpF32(context, (op1) =>
  989. {
  990. return EmitSoftFloatCall(context, nameof(SoftFloat32.FPSqrt), op1);
  991. });
  992. }
  993. }
  994. public static void Vsub_S(ArmEmitterContext context)
  995. {
  996. if (Optimizations.FastFP && Optimizations.UseSse2)
  997. {
  998. EmitScalarBinaryOpF32(context, Intrinsic.X86Subss, Intrinsic.X86Subsd);
  999. }
  1000. else
  1001. {
  1002. EmitScalarBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2));
  1003. }
  1004. }
  1005. public static void Vsub_V(ArmEmitterContext context)
  1006. {
  1007. if (Optimizations.FastFP && Optimizations.UseSse2)
  1008. {
  1009. EmitVectorBinaryOpF32(context, Intrinsic.X86Subps, Intrinsic.X86Subpd);
  1010. }
  1011. else
  1012. {
  1013. EmitVectorBinaryOpF32(context, (op1, op2) => context.Subtract(op1, op2));
  1014. }
  1015. }
  1016. public static void Vsub_I(ArmEmitterContext context)
  1017. {
  1018. if (Optimizations.UseSse2)
  1019. {
  1020. OpCode32SimdReg op = (OpCode32SimdReg)context.CurrOp;
  1021. EmitVectorBinaryOpSimd32(context, (op1, op2) => context.AddIntrinsic(X86PsubInstruction[op.Size], op1, op2));
  1022. }
  1023. else
  1024. {
  1025. EmitVectorBinaryOpZx32(context, (op1, op2) => context.Subtract(op1, op2));
  1026. }
  1027. }
  1028. private static void EmitSse41MaxMinNumOpF32(ArmEmitterContext context, bool isMaxNum, bool scalar)
  1029. {
  1030. IOpCode32Simd op = (IOpCode32Simd)context.CurrOp;
  1031. Func<Operand, Operand, Operand> genericEmit = (n, m) =>
  1032. {
  1033. Operand nNum = context.Copy(n);
  1034. Operand mNum = context.Copy(m);
  1035. Operand nQNaNMask = InstEmit.EmitSse2VectorIsQNaNOpF(context, nNum);
  1036. Operand mQNaNMask = InstEmit.EmitSse2VectorIsQNaNOpF(context, mNum);
  1037. int sizeF = op.Size & 1;
  1038. if (sizeF == 0)
  1039. {
  1040. Operand negInfMask = X86GetAllElements(context, isMaxNum ? float.NegativeInfinity : float.PositiveInfinity);
  1041. Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnps, mQNaNMask, nQNaNMask);
  1042. Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnps, nQNaNMask, mQNaNMask);
  1043. nNum = context.AddIntrinsic(Intrinsic.X86Blendvps, nNum, negInfMask, nMask);
  1044. mNum = context.AddIntrinsic(Intrinsic.X86Blendvps, mNum, negInfMask, mMask);
  1045. return context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxps : Intrinsic.X86Minps, nNum, mNum);
  1046. }
  1047. else /* if (sizeF == 1) */
  1048. {
  1049. Operand negInfMask = X86GetAllElements(context, isMaxNum ? double.NegativeInfinity : double.PositiveInfinity);
  1050. Operand nMask = context.AddIntrinsic(Intrinsic.X86Andnpd, mQNaNMask, nQNaNMask);
  1051. Operand mMask = context.AddIntrinsic(Intrinsic.X86Andnpd, nQNaNMask, mQNaNMask);
  1052. nNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, nNum, negInfMask, nMask);
  1053. mNum = context.AddIntrinsic(Intrinsic.X86Blendvpd, mNum, negInfMask, mMask);
  1054. return context.AddIntrinsic(isMaxNum ? Intrinsic.X86Maxpd : Intrinsic.X86Minpd, nNum, mNum);
  1055. }
  1056. };
  1057. if (scalar)
  1058. {
  1059. EmitScalarBinaryOpSimd32(context, genericEmit);
  1060. }
  1061. else
  1062. {
  1063. EmitVectorBinaryOpSimd32(context, genericEmit);
  1064. }
  1065. }
  1066. private static Operand EmitPolynomialMultiply(ArmEmitterContext context, Operand op1, Operand op2, int eSize)
  1067. {
  1068. Debug.Assert(eSize <= 32);
  1069. Operand result = eSize == 32 ? Const(0L) : Const(0);
  1070. if (eSize == 32)
  1071. {
  1072. op1 = context.ZeroExtend32(OperandType.I64, op1);
  1073. op2 = context.ZeroExtend32(OperandType.I64, op2);
  1074. }
  1075. for (int i = 0; i < eSize; i++)
  1076. {
  1077. Operand mask = context.BitwiseAnd(op1, Const(op1.Type, 1L << i));
  1078. result = context.BitwiseExclusiveOr(result, context.Multiply(op2, mask));
  1079. }
  1080. return result;
  1081. }
  1082. }
  1083. }