NvGpuFifo.cs 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. using Ryujinx.Graphics.Gpu.State;
  2. namespace Ryujinx.Graphics.Gpu
  3. {
  4. class NvGpuFifo
  5. {
  6. private const int MacrosCount = 0x80;
  7. private const int MacroIndexMask = MacrosCount - 1;
  8. // Note: The size of the macro memory is unknown, we just make
  9. // a guess here and use 256kb as the size. Increase if needed.
  10. private const int MmeWords = 256 * 256;
  11. private GpuContext _context;
  12. private struct CachedMacro
  13. {
  14. public int Position { get; private set; }
  15. private bool _executionPending;
  16. private int _argument;
  17. private MacroInterpreter _interpreter;
  18. public CachedMacro(GpuContext context, NvGpuFifo fifo, int position)
  19. {
  20. Position = position;
  21. _executionPending = false;
  22. _argument = 0;
  23. _interpreter = new MacroInterpreter(context, fifo);
  24. }
  25. public void StartExecution(int argument)
  26. {
  27. _argument = argument;
  28. _executionPending = true;
  29. }
  30. public void Execute(int[] mme, GpuState state)
  31. {
  32. if (_executionPending)
  33. {
  34. _executionPending = false;
  35. _interpreter?.Execute(mme, Position, _argument, state);
  36. }
  37. }
  38. public void PushArgument(int argument)
  39. {
  40. _interpreter?.Fifo.Enqueue(argument);
  41. }
  42. }
  43. private int _currMacroPosition;
  44. private int _currMacroBindIndex;
  45. private CachedMacro[] _macros;
  46. private int[] _mme;
  47. private class SubChannel
  48. {
  49. public GpuState State { get; }
  50. public ClassId Class { get; set; }
  51. public SubChannel()
  52. {
  53. State = new GpuState();
  54. }
  55. }
  56. private SubChannel[] _subChannels;
  57. public NvGpuFifo(GpuContext context)
  58. {
  59. _context = context;
  60. _macros = new CachedMacro[MacrosCount];
  61. _mme = new int[MmeWords];
  62. _subChannels = new SubChannel[8];
  63. for (int index = 0; index < _subChannels.Length; index++)
  64. {
  65. _subChannels[index] = new SubChannel();
  66. context.Methods.RegisterCallbacks(_subChannels[index].State);
  67. }
  68. }
  69. public void CallMethod(MethodParams meth)
  70. {
  71. if ((NvGpuFifoMeth)meth.Method == NvGpuFifoMeth.BindChannel)
  72. {
  73. _subChannels[meth.SubChannel].Class = (ClassId)meth.Argument;
  74. }
  75. else if (meth.Method < 0x60)
  76. {
  77. switch ((NvGpuFifoMeth)meth.Method)
  78. {
  79. case NvGpuFifoMeth.WaitForIdle:
  80. {
  81. _context.Renderer.FlushPipelines();
  82. break;
  83. }
  84. case NvGpuFifoMeth.SetMacroUploadAddress:
  85. {
  86. _currMacroPosition = meth.Argument;
  87. break;
  88. }
  89. case NvGpuFifoMeth.SendMacroCodeData:
  90. {
  91. _mme[_currMacroPosition++] = meth.Argument;
  92. break;
  93. }
  94. case NvGpuFifoMeth.SetMacroBindingIndex:
  95. {
  96. _currMacroBindIndex = meth.Argument;
  97. break;
  98. }
  99. case NvGpuFifoMeth.BindMacro:
  100. {
  101. int position = meth.Argument;
  102. _macros[_currMacroBindIndex++] = new CachedMacro(_context, this, position);
  103. break;
  104. }
  105. }
  106. }
  107. else if (meth.Method < 0xe00)
  108. {
  109. _subChannels[meth.SubChannel].State.CallMethod(meth);
  110. }
  111. else
  112. {
  113. int macroIndex = (meth.Method >> 1) & MacroIndexMask;
  114. if ((meth.Method & 1) != 0)
  115. {
  116. _macros[macroIndex].PushArgument(meth.Argument);
  117. }
  118. else
  119. {
  120. _macros[macroIndex].StartExecution(meth.Argument);
  121. }
  122. if (meth.IsLastCall)
  123. {
  124. _macros[macroIndex].Execute(_mme, _subChannels[meth.SubChannel].State);
  125. _context.Methods.PerformDeferredDraws();
  126. }
  127. }
  128. }
  129. }
  130. }