MemoryManager.cs 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. using System;
  2. namespace Ryujinx.Graphics.Gpu.Memory
  3. {
  4. /// <summary>
  5. /// GPU memory manager.
  6. /// </summary>
  7. public class MemoryManager
  8. {
  9. private const ulong AddressSpaceSize = 1UL << 40;
  10. public const ulong BadAddress = ulong.MaxValue;
  11. private const int PtLvl0Bits = 14;
  12. private const int PtLvl1Bits = 14;
  13. public const int PtPageBits = 12;
  14. private const ulong PtLvl0Size = 1UL << PtLvl0Bits;
  15. private const ulong PtLvl1Size = 1UL << PtLvl1Bits;
  16. public const ulong PageSize = 1UL << PtPageBits;
  17. private const ulong PtLvl0Mask = PtLvl0Size - 1;
  18. private const ulong PtLvl1Mask = PtLvl1Size - 1;
  19. public const ulong PageMask = PageSize - 1;
  20. private const int PtLvl0Bit = PtPageBits + PtLvl1Bits;
  21. private const int PtLvl1Bit = PtPageBits;
  22. private const ulong PteUnmapped = 0xffffffff_ffffffff;
  23. private const ulong PteReserved = 0xffffffff_fffffffe;
  24. private readonly ulong[][] _pageTable;
  25. public event EventHandler<UnmapEventArgs> MemoryUnmapped;
  26. /// <summary>
  27. /// Creates a new instance of the GPU memory manager.
  28. /// </summary>
  29. public MemoryManager()
  30. {
  31. _pageTable = new ulong[PtLvl0Size][];
  32. }
  33. /// <summary>
  34. /// Maps a given range of pages to the specified CPU virtual address.
  35. /// </summary>
  36. /// <remarks>
  37. /// All addresses and sizes must be page aligned.
  38. /// </remarks>
  39. /// <param name="pa">CPU virtual address to map into</param>
  40. /// <param name="va">GPU virtual address to be mapped</param>
  41. /// <param name="size">Size in bytes of the mapping</param>
  42. /// <returns>GPU virtual address of the mapping</returns>
  43. public ulong Map(ulong pa, ulong va, ulong size)
  44. {
  45. lock (_pageTable)
  46. {
  47. for (ulong offset = 0; offset < size; offset += PageSize)
  48. {
  49. SetPte(va + offset, pa + offset);
  50. }
  51. }
  52. return va;
  53. }
  54. /// <summary>
  55. /// Maps a given range of pages to an allocated GPU virtual address.
  56. /// The memory is automatically allocated by the memory manager.
  57. /// </summary>
  58. /// <param name="pa">CPU virtual address to map into</param>
  59. /// <param name="size">Size in bytes of the mapping</param>
  60. /// <param name="alignment">Required alignment of the GPU virtual address in bytes</param>
  61. /// <returns>GPU virtual address where the range was mapped, or an all ones mask in case of failure</returns>
  62. public ulong MapAllocate(ulong pa, ulong size, ulong alignment)
  63. {
  64. lock (_pageTable)
  65. {
  66. ulong va = GetFreePosition(size, alignment);
  67. if (va != PteUnmapped)
  68. {
  69. for (ulong offset = 0; offset < size; offset += PageSize)
  70. {
  71. SetPte(va + offset, pa + offset);
  72. }
  73. }
  74. return va;
  75. }
  76. }
  77. /// <summary>
  78. /// Maps a given range of pages to an allocated GPU virtual address.
  79. /// The memory is automatically allocated by the memory manager.
  80. /// This also ensures that the mapping is always done in the first 4GB of GPU address space.
  81. /// </summary>
  82. /// <param name="pa">CPU virtual address to map into</param>
  83. /// <param name="size">Size in bytes of the mapping</param>
  84. /// <returns>GPU virtual address where the range was mapped, or an all ones mask in case of failure</returns>
  85. public ulong MapLow(ulong pa, ulong size)
  86. {
  87. lock (_pageTable)
  88. {
  89. ulong va = GetFreePosition(size, 1, PageSize);
  90. if (va != PteUnmapped && va <= uint.MaxValue && (va + size) <= uint.MaxValue)
  91. {
  92. for (ulong offset = 0; offset < size; offset += PageSize)
  93. {
  94. SetPte(va + offset, pa + offset);
  95. }
  96. }
  97. else
  98. {
  99. va = PteUnmapped;
  100. }
  101. return va;
  102. }
  103. }
  104. /// <summary>
  105. /// Reserves memory at a fixed GPU memory location.
  106. /// This prevents the reserved region from being used for memory allocation for map.
  107. /// </summary>
  108. /// <param name="va">GPU virtual address to reserve</param>
  109. /// <param name="size">Size in bytes of the reservation</param>
  110. /// <returns>GPU virtual address of the reservation, or an all ones mask in case of failure</returns>
  111. public ulong ReserveFixed(ulong va, ulong size)
  112. {
  113. lock (_pageTable)
  114. {
  115. for (ulong offset = 0; offset < size; offset += PageSize)
  116. {
  117. if (IsPageInUse(va + offset))
  118. {
  119. return PteUnmapped;
  120. }
  121. }
  122. for (ulong offset = 0; offset < size; offset += PageSize)
  123. {
  124. SetPte(va + offset, PteReserved);
  125. }
  126. }
  127. return va;
  128. }
  129. /// <summary>
  130. /// Reserves memory at any GPU memory location.
  131. /// </summary>
  132. /// <param name="size">Size in bytes of the reservation</param>
  133. /// <param name="alignment">Reservation address alignment in bytes</param>
  134. /// <returns>GPU virtual address of the reservation, or an all ones mask in case of failure</returns>
  135. public ulong Reserve(ulong size, ulong alignment)
  136. {
  137. lock (_pageTable)
  138. {
  139. ulong address = GetFreePosition(size, alignment);
  140. if (address != PteUnmapped)
  141. {
  142. for (ulong offset = 0; offset < size; offset += PageSize)
  143. {
  144. SetPte(address + offset, PteReserved);
  145. }
  146. }
  147. return address;
  148. }
  149. }
  150. /// <summary>
  151. /// Frees memory that was previously allocated by a map or reserved.
  152. /// </summary>
  153. /// <param name="va">GPU virtual address to free</param>
  154. /// <param name="size">Size in bytes of the region being freed</param>
  155. public void Free(ulong va, ulong size)
  156. {
  157. lock (_pageTable)
  158. {
  159. for (ulong offset = 0; offset < size; offset += PageSize)
  160. {
  161. SetPte(va + offset, PteUnmapped);
  162. }
  163. // Event handlers are not expected to be thread safe.
  164. MemoryUnmapped?.Invoke(this, new UnmapEventArgs(va, size));
  165. }
  166. }
  167. /// <summary>
  168. /// Gets the address of an unused (free) region of the specified size.
  169. /// </summary>
  170. /// <param name="size">Size of the region in bytes</param>
  171. /// <param name="alignment">Required alignment of the region address in bytes</param>
  172. /// <param name="start">Start address of the search on the address space</param>
  173. /// <returns>GPU virtual address of the allocation, or an all ones mask in case of failure</returns>
  174. private ulong GetFreePosition(ulong size, ulong alignment = 1, ulong start = 1UL << 32)
  175. {
  176. // Note: Address 0 is not considered valid by the driver,
  177. // when 0 is returned it's considered a mapping error.
  178. ulong address = start;
  179. ulong freeSize = 0;
  180. if (alignment == 0)
  181. {
  182. alignment = 1;
  183. }
  184. alignment = (alignment + PageMask) & ~PageMask;
  185. while (address + freeSize < AddressSpaceSize)
  186. {
  187. if (!IsPageInUse(address + freeSize))
  188. {
  189. freeSize += PageSize;
  190. if (freeSize >= size)
  191. {
  192. return address;
  193. }
  194. }
  195. else
  196. {
  197. address += freeSize + PageSize;
  198. freeSize = 0;
  199. ulong remainder = address % alignment;
  200. if (remainder != 0)
  201. {
  202. address = (address - remainder) + alignment;
  203. }
  204. }
  205. }
  206. return PteUnmapped;
  207. }
  208. /// <summary>
  209. /// Gets the number of mapped or reserved pages on a given region.
  210. /// </summary>
  211. /// <param name="gpuVa">Start GPU virtual address of the region</param>
  212. /// <param name="maxSize">Maximum size of the data</param>
  213. /// <returns>Mapped size in bytes of the specified region</returns>
  214. internal ulong GetSubSize(ulong gpuVa, ulong maxSize)
  215. {
  216. ulong size = 0;
  217. while (GetPte(gpuVa + size) != PteUnmapped)
  218. {
  219. size += PageSize;
  220. if (size >= maxSize)
  221. {
  222. return maxSize;
  223. }
  224. }
  225. return size;
  226. }
  227. /// <summary>
  228. /// Translates a GPU virtual address to a CPU virtual address.
  229. /// </summary>
  230. /// <param name="gpuVa">GPU virtual address to be translated</param>
  231. /// <returns>CPU virtual address</returns>
  232. public ulong Translate(ulong gpuVa)
  233. {
  234. ulong baseAddress = GetPte(gpuVa);
  235. if (baseAddress == PteUnmapped || baseAddress == PteReserved)
  236. {
  237. return PteUnmapped;
  238. }
  239. return baseAddress + (gpuVa & PageMask);
  240. }
  241. /// <summary>
  242. /// Checks if a given memory region is currently unmapped.
  243. /// </summary>
  244. /// <param name="gpuVa">Start GPU virtual address of the region</param>
  245. /// <param name="size">Size in bytes of the region</param>
  246. /// <returns>True if the region is unmapped (free), false otherwise</returns>
  247. public bool IsRegionFree(ulong gpuVa, ulong size)
  248. {
  249. for (ulong offset = 0; offset < size; offset += PageSize)
  250. {
  251. if (IsPageInUse(gpuVa + offset))
  252. {
  253. return false;
  254. }
  255. }
  256. return true;
  257. }
  258. /// <summary>
  259. /// Checks if a given memory page is mapped or reserved.
  260. /// </summary>
  261. /// <param name="gpuVa">GPU virtual address of the page</param>
  262. /// <returns>True if the page is mapped or reserved, false otherwise</returns>
  263. private bool IsPageInUse(ulong gpuVa)
  264. {
  265. if (gpuVa >> PtLvl0Bits + PtLvl1Bits + PtPageBits != 0)
  266. {
  267. return false;
  268. }
  269. ulong l0 = (gpuVa >> PtLvl0Bit) & PtLvl0Mask;
  270. ulong l1 = (gpuVa >> PtLvl1Bit) & PtLvl1Mask;
  271. if (_pageTable[l0] == null)
  272. {
  273. return false;
  274. }
  275. return _pageTable[l0][l1] != PteUnmapped;
  276. }
  277. /// <summary>
  278. /// Gets the Page Table entry for a given GPU virtual address.
  279. /// </summary>
  280. /// <param name="gpuVa">GPU virtual address</param>
  281. /// <returns>Page table entry (CPU virtual address)</returns>
  282. private ulong GetPte(ulong gpuVa)
  283. {
  284. ulong l0 = (gpuVa >> PtLvl0Bit) & PtLvl0Mask;
  285. ulong l1 = (gpuVa >> PtLvl1Bit) & PtLvl1Mask;
  286. if (_pageTable[l0] == null)
  287. {
  288. return PteUnmapped;
  289. }
  290. return _pageTable[l0][l1];
  291. }
  292. /// <summary>
  293. /// Sets a Page Table entry at a given GPU virtual address.
  294. /// </summary>
  295. /// <param name="gpuVa">GPU virtual address</param>
  296. /// <param name="pte">Page table entry (CPU virtual address)</param>
  297. private void SetPte(ulong gpuVa, ulong pte)
  298. {
  299. ulong l0 = (gpuVa >> PtLvl0Bit) & PtLvl0Mask;
  300. ulong l1 = (gpuVa >> PtLvl1Bit) & PtLvl1Mask;
  301. if (_pageTable[l0] == null)
  302. {
  303. _pageTable[l0] = new ulong[PtLvl1Size];
  304. for (ulong index = 0; index < PtLvl1Size; index++)
  305. {
  306. _pageTable[l0][index] = PteUnmapped;
  307. }
  308. }
  309. _pageTable[l0][l1] = pte;
  310. }
  311. }
  312. }