Pool.cs 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. using Ryujinx.Cpu.Tracking;
  2. using System;
  3. namespace Ryujinx.Graphics.Gpu.Image
  4. {
  5. /// <summary>
  6. /// Represents a pool of GPU resources, such as samplers or textures.
  7. /// </summary>
  8. /// <typeparam name="T1">Type of the GPU resource</typeparam>
  9. /// <typeparam name="T2">Type of the descriptor</typeparam>
  10. abstract class Pool<T1, T2> : IDisposable where T2 : unmanaged
  11. {
  12. protected const int DescriptorSize = 0x20;
  13. protected GpuContext Context;
  14. protected T1[] Items;
  15. protected T2[] DescriptorCache;
  16. /// <summary>
  17. /// The maximum ID value of resources on the pool (inclusive).
  18. /// </summary>
  19. /// <remarks>
  20. /// The maximum amount of resources on the pool is equal to this value plus one.
  21. /// </remarks>
  22. public int MaximumId { get; }
  23. /// <summary>
  24. /// The address of the pool in guest memory.
  25. /// </summary>
  26. public ulong Address { get; }
  27. /// <summary>
  28. /// The size of the pool in bytes.
  29. /// </summary>
  30. public ulong Size { get; }
  31. private readonly CpuMultiRegionHandle _memoryTracking;
  32. private readonly Action<ulong, ulong> _modifiedDelegate;
  33. public Pool(GpuContext context, ulong address, int maximumId)
  34. {
  35. Context = context;
  36. MaximumId = maximumId;
  37. int count = maximumId + 1;
  38. ulong size = (ulong)(uint)count * DescriptorSize;
  39. Items = new T1[count];
  40. DescriptorCache = new T2[count];
  41. Address = address;
  42. Size = size;
  43. _memoryTracking = context.PhysicalMemory.BeginGranularTracking(address, size);
  44. _modifiedDelegate = RegionModified;
  45. }
  46. /// <summary>
  47. /// Gets the descriptor for a given ID.
  48. /// </summary>
  49. /// <param name="id">ID of the descriptor. This is effectively a zero-based index</param>
  50. /// <returns>The descriptor</returns>
  51. public T2 GetDescriptor(int id)
  52. {
  53. return Context.PhysicalMemory.Read<T2>(Address + (ulong)id * DescriptorSize);
  54. }
  55. /// <summary>
  56. /// Gets the GPU resource with the given ID.
  57. /// </summary>
  58. /// <param name="id">ID of the resource. This is effectively a zero-based index</param>
  59. /// <returns>The GPU resource with the given ID</returns>
  60. public abstract T1 Get(int id);
  61. /// <summary>
  62. /// Synchronizes host memory with guest memory.
  63. /// This causes invalidation of pool entries,
  64. /// if a modification of entries by the CPU is detected.
  65. /// </summary>
  66. public void SynchronizeMemory()
  67. {
  68. _memoryTracking.QueryModified(_modifiedDelegate);
  69. }
  70. /// <summary>
  71. /// Indicate that a region of the pool was modified, and must be loaded from memory.
  72. /// </summary>
  73. /// <param name="mAddress">Start address of the modified region</param>
  74. /// <param name="mSize">Size of the modified region</param>
  75. private void RegionModified(ulong mAddress, ulong mSize)
  76. {
  77. if (mAddress < Address)
  78. {
  79. mAddress = Address;
  80. }
  81. ulong maxSize = Address + Size - mAddress;
  82. if (mSize > maxSize)
  83. {
  84. mSize = maxSize;
  85. }
  86. InvalidateRangeImpl(mAddress, mSize);
  87. }
  88. protected abstract void InvalidateRangeImpl(ulong address, ulong size);
  89. protected abstract void Delete(T1 item);
  90. /// <summary>
  91. /// Performs the disposal of all resources stored on the pool.
  92. /// It's an error to try using the pool after disposal.
  93. /// </summary>
  94. public virtual void Dispose()
  95. {
  96. if (Items != null)
  97. {
  98. for (int index = 0; index < Items.Length; index++)
  99. {
  100. Delete(Items[index]);
  101. }
  102. Items = null;
  103. }
  104. _memoryTracking.Dispose();
  105. }
  106. }
  107. }