InstEmitSimdShift.cs 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161
  1. // https://github.com/intel/ARM_NEON_2_x86_SSE/blob/master/NEON_2_SSE.h
  2. using ARMeilleure.Decoders;
  3. using ARMeilleure.IntermediateRepresentation;
  4. using ARMeilleure.Translation;
  5. using System;
  6. using static ARMeilleure.Instructions.InstEmitHelper;
  7. using static ARMeilleure.Instructions.InstEmitSimdHelper;
  8. using static ARMeilleure.IntermediateRepresentation.OperandHelper;
  9. namespace ARMeilleure.Instructions
  10. {
  11. using Func2I = Func<Operand, Operand, Operand>;
  12. static partial class InstEmit
  13. {
  14. #region "Masks"
  15. private static readonly long[] _masks_RshrnShrn = new long[]
  16. {
  17. 14L << 56 | 12L << 48 | 10L << 40 | 08L << 32 | 06L << 24 | 04L << 16 | 02L << 8 | 00L << 0,
  18. 13L << 56 | 12L << 48 | 09L << 40 | 08L << 32 | 05L << 24 | 04L << 16 | 01L << 8 | 00L << 0,
  19. 11L << 56 | 10L << 48 | 09L << 40 | 08L << 32 | 03L << 24 | 02L << 16 | 01L << 8 | 00L << 0
  20. };
  21. private static readonly long[] _masks_SliSri = new long[] // Replication masks.
  22. {
  23. 0x0101010101010101L, 0x0001000100010001L, 0x0000000100000001L, 0x0000000000000001L
  24. };
  25. #endregion
  26. public static void Rshrn_V(ArmEmitterContext context)
  27. {
  28. if (Optimizations.UseSsse3)
  29. {
  30. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  31. int shift = GetImmShr(op);
  32. long roundConst = 1L << (shift - 1);
  33. Operand d = GetVec(op.Rd);
  34. Operand n = GetVec(op.Rn);
  35. Operand dLow = context.AddIntrinsic(Intrinsic.X86Movlhps, d, context.VectorZero());
  36. Operand mask = null;
  37. switch (op.Size + 1)
  38. {
  39. case 1: mask = X86GetAllElements(context, (int)roundConst * 0x00010001); break;
  40. case 2: mask = X86GetAllElements(context, (int)roundConst); break;
  41. case 3: mask = X86GetAllElements(context, roundConst); break;
  42. }
  43. Intrinsic addInst = X86PaddInstruction[op.Size + 1];
  44. Operand res = context.AddIntrinsic(addInst, n, mask);
  45. Intrinsic srlInst = X86PsrlInstruction[op.Size + 1];
  46. res = context.AddIntrinsic(srlInst, res, Const(shift));
  47. Operand mask2 = X86GetAllElements(context, _masks_RshrnShrn[op.Size]);
  48. res = context.AddIntrinsic(Intrinsic.X86Pshufb, res, mask2);
  49. Intrinsic movInst = op.RegisterSize == RegisterSize.Simd128
  50. ? Intrinsic.X86Movlhps
  51. : Intrinsic.X86Movhlps;
  52. res = context.AddIntrinsic(movInst, dLow, res);
  53. context.Copy(d, res);
  54. }
  55. else
  56. {
  57. EmitVectorShrImmNarrowOpZx(context, round: true);
  58. }
  59. }
  60. public static void Shl_S(ArmEmitterContext context)
  61. {
  62. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  63. int shift = GetImmShl(op);
  64. EmitScalarUnaryOpZx(context, (op1) => context.ShiftLeft(op1, Const(shift)));
  65. }
  66. public static void Shl_V(ArmEmitterContext context)
  67. {
  68. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  69. int shift = GetImmShl(op);
  70. if (Optimizations.UseSse2 && op.Size > 0)
  71. {
  72. Operand n = GetVec(op.Rn);
  73. Intrinsic sllInst = X86PsllInstruction[op.Size];
  74. Operand res = context.AddIntrinsic(sllInst, n, Const(shift));
  75. if (op.RegisterSize == RegisterSize.Simd64)
  76. {
  77. res = context.VectorZeroUpper64(res);
  78. }
  79. context.Copy(GetVec(op.Rd), res);
  80. }
  81. else
  82. {
  83. EmitVectorUnaryOpZx(context, (op1) => context.ShiftLeft(op1, Const(shift)));
  84. }
  85. }
  86. public static void Shll_V(ArmEmitterContext context)
  87. {
  88. OpCodeSimd op = (OpCodeSimd)context.CurrOp;
  89. int shift = 8 << op.Size;
  90. if (Optimizations.UseSse41)
  91. {
  92. Operand n = GetVec(op.Rn);
  93. if (op.RegisterSize == RegisterSize.Simd128)
  94. {
  95. n = context.AddIntrinsic(Intrinsic.X86Psrldq, n, Const(8));
  96. }
  97. Intrinsic movsxInst = X86PmovsxInstruction[op.Size];
  98. Operand res = context.AddIntrinsic(movsxInst, n);
  99. Intrinsic sllInst = X86PsllInstruction[op.Size + 1];
  100. res = context.AddIntrinsic(sllInst, res, Const(shift));
  101. context.Copy(GetVec(op.Rd), res);
  102. }
  103. else
  104. {
  105. EmitVectorShImmWidenBinaryZx(context, (op1, op2) => context.ShiftLeft(op1, op2), shift);
  106. }
  107. }
  108. public static void Shrn_V(ArmEmitterContext context)
  109. {
  110. if (Optimizations.UseSsse3)
  111. {
  112. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  113. int shift = GetImmShr(op);
  114. Operand d = GetVec(op.Rd);
  115. Operand n = GetVec(op.Rn);
  116. Operand dLow = context.AddIntrinsic(Intrinsic.X86Movlhps, d, context.VectorZero());
  117. Intrinsic srlInst = X86PsrlInstruction[op.Size + 1];
  118. Operand nShifted = context.AddIntrinsic(srlInst, n, Const(shift));
  119. Operand mask = X86GetAllElements(context, _masks_RshrnShrn[op.Size]);
  120. Operand res = context.AddIntrinsic(Intrinsic.X86Pshufb, nShifted, mask);
  121. Intrinsic movInst = op.RegisterSize == RegisterSize.Simd128
  122. ? Intrinsic.X86Movlhps
  123. : Intrinsic.X86Movhlps;
  124. res = context.AddIntrinsic(movInst, dLow, res);
  125. context.Copy(d, res);
  126. }
  127. else
  128. {
  129. EmitVectorShrImmNarrowOpZx(context, round: false);
  130. }
  131. }
  132. public static void Sli_S(ArmEmitterContext context)
  133. {
  134. EmitSli(context, scalar: true);
  135. }
  136. public static void Sli_V(ArmEmitterContext context)
  137. {
  138. EmitSli(context, scalar: false);
  139. }
  140. public static void Sqrshl_V(ArmEmitterContext context)
  141. {
  142. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  143. Operand res = context.VectorZero();
  144. int elems = op.GetBytesCount() >> op.Size;
  145. for (int index = 0; index < elems; index++)
  146. {
  147. Operand ne = EmitVectorExtractSx(context, op.Rn, index, op.Size);
  148. Operand me = EmitVectorExtractSx(context, op.Rm, index, op.Size);
  149. Operand e = context.Call(new _S64_S64_S64_Bool_S32(SoftFallback.SignedShlRegSatQ), ne, me, Const(1), Const(op.Size));
  150. res = EmitVectorInsert(context, res, e, index, op.Size);
  151. }
  152. context.Copy(GetVec(op.Rd), res);
  153. }
  154. public static void Sqrshrn_S(ArmEmitterContext context)
  155. {
  156. EmitRoundShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.ScalarSxSx);
  157. }
  158. public static void Sqrshrn_V(ArmEmitterContext context)
  159. {
  160. EmitRoundShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.VectorSxSx);
  161. }
  162. public static void Sqrshrun_S(ArmEmitterContext context)
  163. {
  164. EmitRoundShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.ScalarSxZx);
  165. }
  166. public static void Sqrshrun_V(ArmEmitterContext context)
  167. {
  168. EmitRoundShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.VectorSxZx);
  169. }
  170. public static void Sqshl_V(ArmEmitterContext context)
  171. {
  172. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  173. Operand res = context.VectorZero();
  174. int elems = op.GetBytesCount() >> op.Size;
  175. for (int index = 0; index < elems; index++)
  176. {
  177. Operand ne = EmitVectorExtractSx(context, op.Rn, index, op.Size);
  178. Operand me = EmitVectorExtractSx(context, op.Rm, index, op.Size);
  179. Operand e = context.Call(new _S64_S64_S64_Bool_S32(SoftFallback.SignedShlRegSatQ), ne, me, Const(0), Const(op.Size));
  180. res = EmitVectorInsert(context, res, e, index, op.Size);
  181. }
  182. context.Copy(GetVec(op.Rd), res);
  183. }
  184. public static void Sqshrn_S(ArmEmitterContext context)
  185. {
  186. EmitShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.ScalarSxSx);
  187. }
  188. public static void Sqshrn_V(ArmEmitterContext context)
  189. {
  190. EmitShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.VectorSxSx);
  191. }
  192. public static void Sqshrun_S(ArmEmitterContext context)
  193. {
  194. EmitShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.ScalarSxZx);
  195. }
  196. public static void Sqshrun_V(ArmEmitterContext context)
  197. {
  198. EmitShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.VectorSxZx);
  199. }
  200. public static void Sri_S(ArmEmitterContext context)
  201. {
  202. EmitSri(context, scalar: true);
  203. }
  204. public static void Sri_V(ArmEmitterContext context)
  205. {
  206. EmitSri(context, scalar: false);
  207. }
  208. public static void Srshl_V(ArmEmitterContext context)
  209. {
  210. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  211. Operand res = context.VectorZero();
  212. int elems = op.GetBytesCount() >> op.Size;
  213. for (int index = 0; index < elems; index++)
  214. {
  215. Operand ne = EmitVectorExtractSx(context, op.Rn, index, op.Size);
  216. Operand me = EmitVectorExtractSx(context, op.Rm, index, op.Size);
  217. Operand e = context.Call(new _S64_S64_S64_Bool_S32(SoftFallback.SignedShlReg), ne, me, Const(1), Const(op.Size));
  218. res = EmitVectorInsert(context, res, e, index, op.Size);
  219. }
  220. context.Copy(GetVec(op.Rd), res);
  221. }
  222. public static void Srshr_S(ArmEmitterContext context)
  223. {
  224. EmitScalarShrImmOpSx(context, ShrImmFlags.Round);
  225. }
  226. public static void Srshr_V(ArmEmitterContext context)
  227. {
  228. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  229. if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
  230. {
  231. int shift = GetImmShr(op);
  232. int eSize = 8 << op.Size;
  233. Operand n = GetVec(op.Rn);
  234. Intrinsic sllInst = X86PsllInstruction[op.Size];
  235. Operand res = context.AddIntrinsic(sllInst, n, Const(eSize - shift));
  236. Intrinsic srlInst = X86PsrlInstruction[op.Size];
  237. res = context.AddIntrinsic(srlInst, res, Const(eSize - 1));
  238. Intrinsic sraInst = X86PsraInstruction[op.Size];
  239. Operand nSra = context.AddIntrinsic(sraInst, n, Const(shift));
  240. Intrinsic addInst = X86PaddInstruction[op.Size];
  241. res = context.AddIntrinsic(addInst, res, nSra);
  242. if (op.RegisterSize == RegisterSize.Simd64)
  243. {
  244. res = context.VectorZeroUpper64(res);
  245. }
  246. context.Copy(GetVec(op.Rd), res);
  247. }
  248. else
  249. {
  250. EmitVectorShrImmOpSx(context, ShrImmFlags.Round);
  251. }
  252. }
  253. public static void Srsra_S(ArmEmitterContext context)
  254. {
  255. EmitScalarShrImmOpSx(context, ShrImmFlags.Round | ShrImmFlags.Accumulate);
  256. }
  257. public static void Srsra_V(ArmEmitterContext context)
  258. {
  259. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  260. if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
  261. {
  262. int shift = GetImmShr(op);
  263. int eSize = 8 << op.Size;
  264. Operand d = GetVec(op.Rd);
  265. Operand n = GetVec(op.Rn);
  266. Intrinsic sllInst = X86PsllInstruction[op.Size];
  267. Operand res = context.AddIntrinsic(sllInst, n, Const(eSize - shift));
  268. Intrinsic srlInst = X86PsrlInstruction[op.Size];
  269. res = context.AddIntrinsic(srlInst, res, Const(eSize - 1));
  270. Intrinsic sraInst = X86PsraInstruction[op.Size];
  271. Operand nSra = context.AddIntrinsic(sraInst, n, Const(shift));
  272. Intrinsic addInst = X86PaddInstruction[op.Size];
  273. res = context.AddIntrinsic(addInst, res, nSra);
  274. res = context.AddIntrinsic(addInst, res, d);
  275. if (op.RegisterSize == RegisterSize.Simd64)
  276. {
  277. res = context.VectorZeroUpper64(res);
  278. }
  279. context.Copy(d, res);
  280. }
  281. else
  282. {
  283. EmitVectorShrImmOpSx(context, ShrImmFlags.Round | ShrImmFlags.Accumulate);
  284. }
  285. }
  286. public static void Sshl_V(ArmEmitterContext context)
  287. {
  288. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  289. Operand res = context.VectorZero();
  290. int elems = op.GetBytesCount() >> op.Size;
  291. for (int index = 0; index < elems; index++)
  292. {
  293. Operand ne = EmitVectorExtractSx(context, op.Rn, index, op.Size);
  294. Operand me = EmitVectorExtractSx(context, op.Rm, index, op.Size);
  295. Operand e = context.Call(new _S64_S64_S64_Bool_S32(SoftFallback.SignedShlReg), ne, me, Const(0), Const(op.Size));
  296. res = EmitVectorInsert(context, res, e, index, op.Size);
  297. }
  298. context.Copy(GetVec(op.Rd), res);
  299. }
  300. public static void Sshll_V(ArmEmitterContext context)
  301. {
  302. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  303. int shift = GetImmShl(op);
  304. if (Optimizations.UseSse41)
  305. {
  306. Operand n = GetVec(op.Rn);
  307. if (op.RegisterSize == RegisterSize.Simd128)
  308. {
  309. n = context.AddIntrinsic(Intrinsic.X86Psrldq, n, Const(8));
  310. }
  311. Intrinsic movsxInst = X86PmovsxInstruction[op.Size];
  312. Operand res = context.AddIntrinsic(movsxInst, n);
  313. if (shift != 0)
  314. {
  315. Intrinsic sllInst = X86PsllInstruction[op.Size + 1];
  316. res = context.AddIntrinsic(sllInst, res, Const(shift));
  317. }
  318. context.Copy(GetVec(op.Rd), res);
  319. }
  320. else
  321. {
  322. EmitVectorShImmWidenBinarySx(context, (op1, op2) => context.ShiftLeft(op1, op2), shift);
  323. }
  324. }
  325. public static void Sshr_S(ArmEmitterContext context)
  326. {
  327. EmitShrImmOp(context, ShrImmFlags.ScalarSx);
  328. }
  329. public static void Sshr_V(ArmEmitterContext context)
  330. {
  331. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  332. if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
  333. {
  334. int shift = GetImmShr(op);
  335. Operand n = GetVec(op.Rn);
  336. Intrinsic sraInst = X86PsraInstruction[op.Size];
  337. Operand res = context.AddIntrinsic(sraInst, n, Const(shift));
  338. if (op.RegisterSize == RegisterSize.Simd64)
  339. {
  340. res = context.VectorZeroUpper64(res);
  341. }
  342. context.Copy(GetVec(op.Rd), res);
  343. }
  344. else
  345. {
  346. EmitShrImmOp(context, ShrImmFlags.VectorSx);
  347. }
  348. }
  349. public static void Ssra_S(ArmEmitterContext context)
  350. {
  351. EmitScalarShrImmOpSx(context, ShrImmFlags.Accumulate);
  352. }
  353. public static void Ssra_V(ArmEmitterContext context)
  354. {
  355. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  356. if (Optimizations.UseSse2 && op.Size > 0 && op.Size < 3)
  357. {
  358. int shift = GetImmShr(op);
  359. Operand d = GetVec(op.Rd);
  360. Operand n = GetVec(op.Rn);
  361. Intrinsic sraInst = X86PsraInstruction[op.Size];
  362. Operand res = context.AddIntrinsic(sraInst, n, Const(shift));
  363. Intrinsic addInst = X86PaddInstruction[op.Size];
  364. res = context.AddIntrinsic(addInst, res, d);
  365. if (op.RegisterSize == RegisterSize.Simd64)
  366. {
  367. res = context.VectorZeroUpper64(res);
  368. }
  369. context.Copy(d, res);
  370. }
  371. else
  372. {
  373. EmitVectorShrImmOpSx(context, ShrImmFlags.Accumulate);
  374. }
  375. }
  376. public static void Uqrshl_V(ArmEmitterContext context)
  377. {
  378. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  379. Operand res = context.VectorZero();
  380. int elems = op.GetBytesCount() >> op.Size;
  381. for (int index = 0; index < elems; index++)
  382. {
  383. Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
  384. Operand me = EmitVectorExtractZx(context, op.Rm, index, op.Size);
  385. Operand e = context.Call(new _U64_U64_U64_Bool_S32(SoftFallback.UnsignedShlRegSatQ), ne, me, Const(1), Const(op.Size));
  386. res = EmitVectorInsert(context, res, e, index, op.Size);
  387. }
  388. context.Copy(GetVec(op.Rd), res);
  389. }
  390. public static void Uqrshrn_S(ArmEmitterContext context)
  391. {
  392. EmitRoundShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.ScalarZxZx);
  393. }
  394. public static void Uqrshrn_V(ArmEmitterContext context)
  395. {
  396. EmitRoundShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.VectorZxZx);
  397. }
  398. public static void Uqshl_V(ArmEmitterContext context)
  399. {
  400. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  401. Operand res = context.VectorZero();
  402. int elems = op.GetBytesCount() >> op.Size;
  403. for (int index = 0; index < elems; index++)
  404. {
  405. Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
  406. Operand me = EmitVectorExtractZx(context, op.Rm, index, op.Size);
  407. Operand e = context.Call(new _U64_U64_U64_Bool_S32(SoftFallback.UnsignedShlRegSatQ), ne, me, Const(0), Const(op.Size));
  408. res = EmitVectorInsert(context, res, e, index, op.Size);
  409. }
  410. context.Copy(GetVec(op.Rd), res);
  411. }
  412. public static void Uqshrn_S(ArmEmitterContext context)
  413. {
  414. EmitShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.ScalarZxZx);
  415. }
  416. public static void Uqshrn_V(ArmEmitterContext context)
  417. {
  418. EmitShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.VectorZxZx);
  419. }
  420. public static void Urshl_V(ArmEmitterContext context)
  421. {
  422. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  423. Operand res = context.VectorZero();
  424. int elems = op.GetBytesCount() >> op.Size;
  425. for (int index = 0; index < elems; index++)
  426. {
  427. Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
  428. Operand me = EmitVectorExtractZx(context, op.Rm, index, op.Size);
  429. Operand e = context.Call(new _U64_U64_U64_Bool_S32(SoftFallback.UnsignedShlReg), ne, me, Const(1), Const(op.Size));
  430. res = EmitVectorInsert(context, res, e, index, op.Size);
  431. }
  432. context.Copy(GetVec(op.Rd), res);
  433. }
  434. public static void Urshr_S(ArmEmitterContext context)
  435. {
  436. EmitScalarShrImmOpZx(context, ShrImmFlags.Round);
  437. }
  438. public static void Urshr_V(ArmEmitterContext context)
  439. {
  440. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  441. if (Optimizations.UseSse2 && op.Size > 0)
  442. {
  443. int shift = GetImmShr(op);
  444. int eSize = 8 << op.Size;
  445. Operand n = GetVec(op.Rn);
  446. Intrinsic sllInst = X86PsllInstruction[op.Size];
  447. Operand res = context.AddIntrinsic(sllInst, n, Const(eSize - shift));
  448. Intrinsic srlInst = X86PsrlInstruction[op.Size];
  449. res = context.AddIntrinsic(srlInst, res, Const(eSize - 1));
  450. Operand nSrl = context.AddIntrinsic(srlInst, n, Const(shift));
  451. Intrinsic addInst = X86PaddInstruction[op.Size];
  452. res = context.AddIntrinsic(addInst, res, nSrl);
  453. if (op.RegisterSize == RegisterSize.Simd64)
  454. {
  455. res = context.VectorZeroUpper64(res);
  456. }
  457. context.Copy(GetVec(op.Rd), res);
  458. }
  459. else
  460. {
  461. EmitVectorShrImmOpZx(context, ShrImmFlags.Round);
  462. }
  463. }
  464. public static void Ursra_S(ArmEmitterContext context)
  465. {
  466. EmitScalarShrImmOpZx(context, ShrImmFlags.Round | ShrImmFlags.Accumulate);
  467. }
  468. public static void Ursra_V(ArmEmitterContext context)
  469. {
  470. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  471. if (Optimizations.UseSse2 && op.Size > 0)
  472. {
  473. int shift = GetImmShr(op);
  474. int eSize = 8 << op.Size;
  475. Operand d = GetVec(op.Rd);
  476. Operand n = GetVec(op.Rn);
  477. Intrinsic sllInst = X86PsllInstruction[op.Size];
  478. Operand res = context.AddIntrinsic(sllInst, n, Const(eSize - shift));
  479. Intrinsic srlInst = X86PsrlInstruction[op.Size];
  480. res = context.AddIntrinsic(srlInst, res, Const(eSize - 1));
  481. Operand nSrl = context.AddIntrinsic(srlInst, n, Const(shift));
  482. Intrinsic addInst = X86PaddInstruction[op.Size];
  483. res = context.AddIntrinsic(addInst, res, nSrl);
  484. res = context.AddIntrinsic(addInst, res, d);
  485. if (op.RegisterSize == RegisterSize.Simd64)
  486. {
  487. res = context.VectorZeroUpper64(res);
  488. }
  489. context.Copy(d, res);
  490. }
  491. else
  492. {
  493. EmitVectorShrImmOpZx(context, ShrImmFlags.Round | ShrImmFlags.Accumulate);
  494. }
  495. }
  496. public static void Ushl_V(ArmEmitterContext context)
  497. {
  498. OpCodeSimdReg op = (OpCodeSimdReg)context.CurrOp;
  499. Operand res = context.VectorZero();
  500. int elems = op.GetBytesCount() >> op.Size;
  501. for (int index = 0; index < elems; index++)
  502. {
  503. Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
  504. Operand me = EmitVectorExtractZx(context, op.Rm, index, op.Size);
  505. Operand e = context.Call(new _U64_U64_U64_Bool_S32(SoftFallback.UnsignedShlReg), ne, me, Const(0), Const(op.Size));
  506. res = EmitVectorInsert(context, res, e, index, op.Size);
  507. }
  508. context.Copy(GetVec(op.Rd), res);
  509. }
  510. public static void Ushll_V(ArmEmitterContext context)
  511. {
  512. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  513. int shift = GetImmShl(op);
  514. if (Optimizations.UseSse41)
  515. {
  516. Operand n = GetVec(op.Rn);
  517. if (op.RegisterSize == RegisterSize.Simd128)
  518. {
  519. n = context.AddIntrinsic(Intrinsic.X86Psrldq, n, Const(8));
  520. }
  521. Intrinsic movzxInst = X86PmovzxInstruction[op.Size];
  522. Operand res = context.AddIntrinsic(movzxInst, n);
  523. if (shift != 0)
  524. {
  525. Intrinsic sllInst = X86PsllInstruction[op.Size + 1];
  526. res = context.AddIntrinsic(sllInst, res, Const(shift));
  527. }
  528. context.Copy(GetVec(op.Rd), res);
  529. }
  530. else
  531. {
  532. EmitVectorShImmWidenBinaryZx(context, (op1, op2) => context.ShiftLeft(op1, op2), shift);
  533. }
  534. }
  535. public static void Ushr_S(ArmEmitterContext context)
  536. {
  537. EmitShrImmOp(context, ShrImmFlags.ScalarZx);
  538. }
  539. public static void Ushr_V(ArmEmitterContext context)
  540. {
  541. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  542. if (Optimizations.UseSse2 && op.Size > 0)
  543. {
  544. int shift = GetImmShr(op);
  545. Operand n = GetVec(op.Rn);
  546. Intrinsic srlInst = X86PsrlInstruction[op.Size];
  547. Operand res = context.AddIntrinsic(srlInst, n, Const(shift));
  548. if (op.RegisterSize == RegisterSize.Simd64)
  549. {
  550. res = context.VectorZeroUpper64(res);
  551. }
  552. context.Copy(GetVec(op.Rd), res);
  553. }
  554. else
  555. {
  556. EmitShrImmOp(context, ShrImmFlags.VectorZx);
  557. }
  558. }
  559. public static void Usra_S(ArmEmitterContext context)
  560. {
  561. EmitScalarShrImmOpZx(context, ShrImmFlags.Accumulate);
  562. }
  563. public static void Usra_V(ArmEmitterContext context)
  564. {
  565. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  566. if (Optimizations.UseSse2 && op.Size > 0)
  567. {
  568. int shift = GetImmShr(op);
  569. Operand d = GetVec(op.Rd);
  570. Operand n = GetVec(op.Rn);
  571. Intrinsic srlInst = X86PsrlInstruction[op.Size];
  572. Operand res = context.AddIntrinsic(srlInst, n, Const(shift));
  573. Intrinsic addInst = X86PaddInstruction[op.Size];
  574. res = context.AddIntrinsic(addInst, res, d);
  575. if (op.RegisterSize == RegisterSize.Simd64)
  576. {
  577. res = context.VectorZeroUpper64(res);
  578. }
  579. context.Copy(d, res);
  580. }
  581. else
  582. {
  583. EmitVectorShrImmOpZx(context, ShrImmFlags.Accumulate);
  584. }
  585. }
  586. [Flags]
  587. private enum ShrImmFlags
  588. {
  589. Scalar = 1 << 0,
  590. Signed = 1 << 1,
  591. Round = 1 << 2,
  592. Accumulate = 1 << 3,
  593. ScalarSx = Scalar | Signed,
  594. ScalarZx = Scalar,
  595. VectorSx = Signed,
  596. VectorZx = 0
  597. }
  598. private static void EmitScalarShrImmOpSx(ArmEmitterContext context, ShrImmFlags flags)
  599. {
  600. EmitShrImmOp(context, ShrImmFlags.ScalarSx | flags);
  601. }
  602. private static void EmitScalarShrImmOpZx(ArmEmitterContext context, ShrImmFlags flags)
  603. {
  604. EmitShrImmOp(context, ShrImmFlags.ScalarZx | flags);
  605. }
  606. private static void EmitVectorShrImmOpSx(ArmEmitterContext context, ShrImmFlags flags)
  607. {
  608. EmitShrImmOp(context, ShrImmFlags.VectorSx | flags);
  609. }
  610. private static void EmitVectorShrImmOpZx(ArmEmitterContext context, ShrImmFlags flags)
  611. {
  612. EmitShrImmOp(context, ShrImmFlags.VectorZx | flags);
  613. }
  614. private static void EmitShrImmOp(ArmEmitterContext context, ShrImmFlags flags)
  615. {
  616. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  617. Operand res = context.VectorZero();
  618. bool scalar = (flags & ShrImmFlags.Scalar) != 0;
  619. bool signed = (flags & ShrImmFlags.Signed) != 0;
  620. bool round = (flags & ShrImmFlags.Round) != 0;
  621. bool accumulate = (flags & ShrImmFlags.Accumulate) != 0;
  622. int shift = GetImmShr(op);
  623. long roundConst = 1L << (shift - 1);
  624. int elems = !scalar ? op.GetBytesCount() >> op.Size : 1;
  625. for (int index = 0; index < elems; index++)
  626. {
  627. Operand e = EmitVectorExtract(context, op.Rn, index, op.Size, signed);
  628. if (op.Size <= 2)
  629. {
  630. if (round)
  631. {
  632. e = context.Add(e, Const(roundConst));
  633. }
  634. e = signed
  635. ? context.ShiftRightSI(e, Const(shift))
  636. : context.ShiftRightUI(e, Const(shift));
  637. }
  638. else /* if (op.Size == 3) */
  639. {
  640. e = EmitShrImm64(context, e, signed, round ? roundConst : 0L, shift);
  641. }
  642. if (accumulate)
  643. {
  644. Operand de = EmitVectorExtract(context, op.Rd, index, op.Size, signed);
  645. e = context.Add(e, de);
  646. }
  647. res = EmitVectorInsert(context, res, e, index, op.Size);
  648. }
  649. context.Copy(GetVec(op.Rd), res);
  650. }
  651. private static void EmitVectorShrImmNarrowOpZx(ArmEmitterContext context, bool round)
  652. {
  653. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  654. int shift = GetImmShr(op);
  655. long roundConst = 1L << (shift - 1);
  656. int elems = 8 >> op.Size;
  657. int part = op.RegisterSize == RegisterSize.Simd128 ? elems : 0;
  658. Operand res = part == 0 ? context.VectorZero() : context.Copy(GetVec(op.Rd));
  659. for (int index = 0; index < elems; index++)
  660. {
  661. Operand e = EmitVectorExtractZx(context, op.Rn, index, op.Size + 1);
  662. if (round)
  663. {
  664. e = context.Add(e, Const(roundConst));
  665. }
  666. e = context.ShiftRightUI(e, Const(shift));
  667. res = EmitVectorInsert(context, res, e, part + index, op.Size);
  668. }
  669. context.Copy(GetVec(op.Rd), res);
  670. }
  671. [Flags]
  672. private enum ShrImmSaturatingNarrowFlags
  673. {
  674. Scalar = 1 << 0,
  675. SignedSrc = 1 << 1,
  676. SignedDst = 1 << 2,
  677. Round = 1 << 3,
  678. ScalarSxSx = Scalar | SignedSrc | SignedDst,
  679. ScalarSxZx = Scalar | SignedSrc,
  680. ScalarZxZx = Scalar,
  681. VectorSxSx = SignedSrc | SignedDst,
  682. VectorSxZx = SignedSrc,
  683. VectorZxZx = 0
  684. }
  685. private static void EmitRoundShrImmSaturatingNarrowOp(ArmEmitterContext context, ShrImmSaturatingNarrowFlags flags)
  686. {
  687. EmitShrImmSaturatingNarrowOp(context, ShrImmSaturatingNarrowFlags.Round | flags);
  688. }
  689. private static void EmitShrImmSaturatingNarrowOp(ArmEmitterContext context, ShrImmSaturatingNarrowFlags flags)
  690. {
  691. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  692. bool scalar = (flags & ShrImmSaturatingNarrowFlags.Scalar) != 0;
  693. bool signedSrc = (flags & ShrImmSaturatingNarrowFlags.SignedSrc) != 0;
  694. bool signedDst = (flags & ShrImmSaturatingNarrowFlags.SignedDst) != 0;
  695. bool round = (flags & ShrImmSaturatingNarrowFlags.Round) != 0;
  696. int shift = GetImmShr(op);
  697. long roundConst = 1L << (shift - 1);
  698. int elems = !scalar ? 8 >> op.Size : 1;
  699. int part = !scalar && (op.RegisterSize == RegisterSize.Simd128) ? elems : 0;
  700. Operand res = part == 0 ? context.VectorZero() : context.Copy(GetVec(op.Rd));
  701. for (int index = 0; index < elems; index++)
  702. {
  703. Operand e = EmitVectorExtract(context, op.Rn, index, op.Size + 1, signedSrc);
  704. if (op.Size <= 1 || !round)
  705. {
  706. if (round)
  707. {
  708. e = context.Add(e, Const(roundConst));
  709. }
  710. e = signedSrc
  711. ? context.ShiftRightSI(e, Const(shift))
  712. : context.ShiftRightUI(e, Const(shift));
  713. }
  714. else /* if (op.Size == 2 && round) */
  715. {
  716. e = EmitShrImm64(context, e, signedSrc, roundConst, shift); // shift <= 32
  717. }
  718. e = EmitSatQ(context, e, op.Size, signedSrc, signedDst);
  719. res = EmitVectorInsert(context, res, e, part + index, op.Size);
  720. }
  721. context.Copy(GetVec(op.Rd), res);
  722. }
  723. // dst64 = (Int(src64, signed) + roundConst) >> shift;
  724. private static Operand EmitShrImm64(
  725. ArmEmitterContext context,
  726. Operand value,
  727. bool signed,
  728. long roundConst,
  729. int shift)
  730. {
  731. Delegate dlg = signed
  732. ? (Delegate)new _S64_S64_S64_S32(SoftFallback.SignedShrImm64)
  733. : (Delegate)new _U64_U64_S64_S32(SoftFallback.UnsignedShrImm64);
  734. return context.Call(dlg, value, Const(roundConst), Const(shift));
  735. }
  736. private static void EmitVectorShImmWidenBinarySx(ArmEmitterContext context, Func2I emit, int imm)
  737. {
  738. EmitVectorShImmWidenBinaryOp(context, emit, imm, signed: true);
  739. }
  740. private static void EmitVectorShImmWidenBinaryZx(ArmEmitterContext context, Func2I emit, int imm)
  741. {
  742. EmitVectorShImmWidenBinaryOp(context, emit, imm, signed: false);
  743. }
  744. private static void EmitVectorShImmWidenBinaryOp(ArmEmitterContext context, Func2I emit, int imm, bool signed)
  745. {
  746. OpCodeSimd op = (OpCodeSimd)context.CurrOp;
  747. Operand res = context.VectorZero();
  748. int elems = 8 >> op.Size;
  749. int part = op.RegisterSize == RegisterSize.Simd128 ? elems : 0;
  750. for (int index = 0; index < elems; index++)
  751. {
  752. Operand ne = EmitVectorExtract(context, op.Rn, part + index, op.Size, signed);
  753. res = EmitVectorInsert(context, res, emit(ne, Const(imm)), index, op.Size + 1);
  754. }
  755. context.Copy(GetVec(op.Rd), res);
  756. }
  757. private static void EmitSli(ArmEmitterContext context, bool scalar)
  758. {
  759. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  760. int shift = GetImmShl(op);
  761. ulong mask = shift != 0 ? ulong.MaxValue >> (64 - shift) : 0UL;
  762. if (Optimizations.UseSse2 && op.Size > 0)
  763. {
  764. Operand d = GetVec(op.Rd);
  765. Operand n = GetVec(op.Rn);
  766. Intrinsic sllInst = X86PsllInstruction[op.Size];
  767. Operand nShifted = context.AddIntrinsic(sllInst, n, Const(shift));
  768. Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]);
  769. Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask);
  770. Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked);
  771. if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
  772. {
  773. res = context.VectorZeroUpper64(res);
  774. }
  775. context.Copy(d, res);
  776. }
  777. else
  778. {
  779. Operand res = context.VectorZero();
  780. int elems = !scalar ? op.GetBytesCount() >> op.Size : 1;
  781. for (int index = 0; index < elems; index++)
  782. {
  783. Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
  784. Operand neShifted = context.ShiftLeft(ne, Const(shift));
  785. Operand de = EmitVectorExtractZx(context, op.Rd, index, op.Size);
  786. Operand deMasked = context.BitwiseAnd(de, Const(mask));
  787. Operand e = context.BitwiseOr(neShifted, deMasked);
  788. res = EmitVectorInsert(context, res, e, index, op.Size);
  789. }
  790. context.Copy(GetVec(op.Rd), res);
  791. }
  792. }
  793. private static void EmitSri(ArmEmitterContext context, bool scalar)
  794. {
  795. OpCodeSimdShImm op = (OpCodeSimdShImm)context.CurrOp;
  796. int shift = GetImmShr(op);
  797. int eSize = 8 << op.Size;
  798. ulong mask = (ulong.MaxValue << (eSize - shift)) & (ulong.MaxValue >> (64 - eSize));
  799. if (Optimizations.UseSse2 && op.Size > 0)
  800. {
  801. Operand d = GetVec(op.Rd);
  802. Operand n = GetVec(op.Rn);
  803. Intrinsic srlInst = X86PsrlInstruction[op.Size];
  804. Operand nShifted = context.AddIntrinsic(srlInst, n, Const(shift));
  805. Operand dMask = X86GetAllElements(context, (long)mask * _masks_SliSri[op.Size]);
  806. Operand dMasked = context.AddIntrinsic(Intrinsic.X86Pand, d, dMask);
  807. Operand res = context.AddIntrinsic(Intrinsic.X86Por, nShifted, dMasked);
  808. if ((op.RegisterSize == RegisterSize.Simd64) || scalar)
  809. {
  810. res = context.VectorZeroUpper64(res);
  811. }
  812. context.Copy(d, res);
  813. }
  814. else
  815. {
  816. Operand res = context.VectorZero();
  817. int elems = !scalar ? op.GetBytesCount() >> op.Size : 1;
  818. for (int index = 0; index < elems; index++)
  819. {
  820. Operand ne = EmitVectorExtractZx(context, op.Rn, index, op.Size);
  821. Operand neShifted = shift != 64 ? context.ShiftRightUI(ne, Const(shift)) : Const(0UL);
  822. Operand de = EmitVectorExtractZx(context, op.Rd, index, op.Size);
  823. Operand deMasked = context.BitwiseAnd(de, Const(mask));
  824. Operand e = context.BitwiseOr(neShifted, deMasked);
  825. res = EmitVectorInsert(context, res, e, index, op.Size);
  826. }
  827. context.Copy(GetVec(op.Rd), res);
  828. }
  829. }
  830. }
  831. }