KMemoryRegionManager.cs 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. using Ryujinx.Common;
  2. using Ryujinx.HLE.HOS.Kernel.Common;
  3. namespace Ryujinx.HLE.HOS.Kernel.Memory
  4. {
  5. class KMemoryRegionManager
  6. {
  7. private static readonly int[] BlockOrders = new int[] { 12, 16, 21, 22, 25, 29, 30 };
  8. public ulong Address { get; private set; }
  9. public ulong EndAddr { get; private set; }
  10. public ulong Size { get; private set; }
  11. private int _blockOrdersCount;
  12. private KMemoryRegionBlock[] _blocks;
  13. public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
  14. {
  15. _blocks = new KMemoryRegionBlock[BlockOrders.Length];
  16. Address = address;
  17. Size = size;
  18. EndAddr = endAddr;
  19. _blockOrdersCount = BlockOrders.Length;
  20. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  21. {
  22. _blocks[blockIndex] = new KMemoryRegionBlock();
  23. _blocks[blockIndex].Order = BlockOrders[blockIndex];
  24. int nextOrder = blockIndex == _blockOrdersCount - 1 ? 0 : BlockOrders[blockIndex + 1];
  25. _blocks[blockIndex].NextOrder = nextOrder;
  26. int currBlockSize = 1 << BlockOrders[blockIndex];
  27. int nextBlockSize = currBlockSize;
  28. if (nextOrder != 0)
  29. {
  30. nextBlockSize = 1 << nextOrder;
  31. }
  32. ulong startAligned = BitUtils.AlignDown(address, nextBlockSize);
  33. ulong endAddrAligned = BitUtils.AlignDown(endAddr, currBlockSize);
  34. ulong sizeInBlocksTruncated = (endAddrAligned - startAligned) >> BlockOrders[blockIndex];
  35. ulong endAddrRounded = BitUtils.AlignUp(address + size, nextBlockSize);
  36. ulong sizeInBlocksRounded = (endAddrRounded - startAligned) >> BlockOrders[blockIndex];
  37. _blocks[blockIndex].StartAligned = startAligned;
  38. _blocks[blockIndex].SizeInBlocksTruncated = sizeInBlocksTruncated;
  39. _blocks[blockIndex].SizeInBlocksRounded = sizeInBlocksRounded;
  40. ulong currSizeInBlocks = sizeInBlocksRounded;
  41. int maxLevel = 0;
  42. do
  43. {
  44. maxLevel++;
  45. }
  46. while ((currSizeInBlocks /= 64) != 0);
  47. _blocks[blockIndex].MaxLevel = maxLevel;
  48. _blocks[blockIndex].Masks = new long[maxLevel][];
  49. currSizeInBlocks = sizeInBlocksRounded;
  50. for (int level = maxLevel - 1; level >= 0; level--)
  51. {
  52. currSizeInBlocks = (currSizeInBlocks + 63) / 64;
  53. _blocks[blockIndex].Masks[level] = new long[currSizeInBlocks];
  54. }
  55. }
  56. if (size != 0)
  57. {
  58. FreePages(address, size / KMemoryManager.PageSize);
  59. }
  60. }
  61. public KernelResult AllocatePages(ulong pagesCount, bool backwards, out KPageList pageList)
  62. {
  63. lock (_blocks)
  64. {
  65. return AllocatePagesImpl(pagesCount, backwards, out pageList);
  66. }
  67. }
  68. public ulong AllocatePagesContiguous(ulong pagesCount, bool backwards)
  69. {
  70. lock (_blocks)
  71. {
  72. return AllocatePagesContiguousImpl(pagesCount, backwards);
  73. }
  74. }
  75. private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList)
  76. {
  77. pageList = new KPageList();
  78. if (_blockOrdersCount > 0)
  79. {
  80. if (GetFreePagesImpl() < pagesCount)
  81. {
  82. return KernelResult.OutOfMemory;
  83. }
  84. }
  85. else if (pagesCount != 0)
  86. {
  87. return KernelResult.OutOfMemory;
  88. }
  89. for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--)
  90. {
  91. KMemoryRegionBlock block = _blocks[blockIndex];
  92. ulong bestFitBlockSize = 1UL << block.Order;
  93. ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize;
  94. // Check if this is the best fit for this page size.
  95. // If so, try allocating as much requested pages as possible.
  96. while (blockPagesCount <= pagesCount)
  97. {
  98. ulong address = AllocatePagesForOrder(blockIndex, backwards, bestFitBlockSize);
  99. // The address being zero means that no free space was found on that order,
  100. // just give up and try with the next one.
  101. if (address == 0)
  102. {
  103. break;
  104. }
  105. // Add new allocated page(s) to the pages list.
  106. // If an error occurs, then free all allocated pages and fail.
  107. KernelResult result = pageList.AddRange(address, blockPagesCount);
  108. if (result != KernelResult.Success)
  109. {
  110. FreePages(address, blockPagesCount);
  111. foreach (KPageNode pageNode in pageList)
  112. {
  113. FreePages(pageNode.Address, pageNode.PagesCount);
  114. }
  115. return result;
  116. }
  117. pagesCount -= blockPagesCount;
  118. }
  119. }
  120. // Success case, all requested pages were allocated successfully.
  121. if (pagesCount == 0)
  122. {
  123. return KernelResult.Success;
  124. }
  125. // Error case, free allocated pages and return out of memory.
  126. foreach (KPageNode pageNode in pageList)
  127. {
  128. FreePages(pageNode.Address, pageNode.PagesCount);
  129. }
  130. pageList = null;
  131. return KernelResult.OutOfMemory;
  132. }
  133. private ulong AllocatePagesContiguousImpl(ulong pagesCount, bool backwards)
  134. {
  135. if (pagesCount == 0 || _blocks.Length < 1)
  136. {
  137. return 0;
  138. }
  139. int blockIndex = 0;
  140. while ((1UL << _blocks[blockIndex].Order) / KMemoryManager.PageSize < pagesCount)
  141. {
  142. if (++blockIndex >= _blocks.Length)
  143. {
  144. return 0;
  145. }
  146. }
  147. ulong tightestFitBlockSize = 1UL << _blocks[blockIndex].Order;
  148. ulong address = AllocatePagesForOrder(blockIndex, backwards, tightestFitBlockSize);
  149. ulong requiredSize = pagesCount * KMemoryManager.PageSize;
  150. if (address != 0 && tightestFitBlockSize > requiredSize)
  151. {
  152. FreePages(address + requiredSize, (tightestFitBlockSize - requiredSize) / KMemoryManager.PageSize);
  153. }
  154. return address;
  155. }
  156. private ulong AllocatePagesForOrder(int blockIndex, bool backwards, ulong bestFitBlockSize)
  157. {
  158. ulong address = 0;
  159. KMemoryRegionBlock block = null;
  160. for (int currBlockIndex = blockIndex;
  161. currBlockIndex < _blockOrdersCount && address == 0;
  162. currBlockIndex++)
  163. {
  164. block = _blocks[currBlockIndex];
  165. int index = 0;
  166. bool zeroMask = false;
  167. for (int level = 0; level < block.MaxLevel; level++)
  168. {
  169. long mask = block.Masks[level][index];
  170. if (mask == 0)
  171. {
  172. zeroMask = true;
  173. break;
  174. }
  175. if (backwards)
  176. {
  177. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  178. }
  179. else
  180. {
  181. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  182. }
  183. }
  184. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  185. {
  186. continue;
  187. }
  188. block.FreeCount--;
  189. int tempIdx = index;
  190. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  191. {
  192. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  193. if (block.Masks[level][tempIdx / 64] != 0)
  194. {
  195. break;
  196. }
  197. }
  198. address = block.StartAligned + ((ulong)index << block.Order);
  199. }
  200. for (int currBlockIndex = blockIndex;
  201. currBlockIndex < _blockOrdersCount && address == 0;
  202. currBlockIndex++)
  203. {
  204. block = _blocks[currBlockIndex];
  205. int index = 0;
  206. bool zeroMask = false;
  207. for (int level = 0; level < block.MaxLevel; level++)
  208. {
  209. long mask = block.Masks[level][index];
  210. if (mask == 0)
  211. {
  212. zeroMask = true;
  213. break;
  214. }
  215. if (backwards)
  216. {
  217. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  218. }
  219. else
  220. {
  221. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  222. }
  223. }
  224. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  225. {
  226. continue;
  227. }
  228. block.FreeCount--;
  229. int tempIdx = index;
  230. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  231. {
  232. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  233. if (block.Masks[level][tempIdx / 64] != 0)
  234. {
  235. break;
  236. }
  237. }
  238. address = block.StartAligned + ((ulong)index << block.Order);
  239. }
  240. if (address != 0)
  241. {
  242. // If we are using a larger order than best fit, then we should
  243. // split it into smaller blocks.
  244. ulong firstFreeBlockSize = 1UL << block.Order;
  245. if (firstFreeBlockSize > bestFitBlockSize)
  246. {
  247. FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize);
  248. }
  249. }
  250. return address;
  251. }
  252. public void FreePage(ulong address)
  253. {
  254. lock (_blocks)
  255. {
  256. FreePages(address, 1);
  257. }
  258. }
  259. public void FreePages(KPageList pageList)
  260. {
  261. lock (_blocks)
  262. {
  263. foreach (KPageNode pageNode in pageList)
  264. {
  265. FreePages(pageNode.Address, pageNode.PagesCount);
  266. }
  267. }
  268. }
  269. private void FreePages(ulong address, ulong pagesCount)
  270. {
  271. ulong endAddr = address + pagesCount * KMemoryManager.PageSize;
  272. int blockIndex = _blockOrdersCount - 1;
  273. ulong addressRounded = 0;
  274. ulong endAddrTruncated = 0;
  275. for (; blockIndex >= 0; blockIndex--)
  276. {
  277. KMemoryRegionBlock allocInfo = _blocks[blockIndex];
  278. int blockSize = 1 << allocInfo.Order;
  279. addressRounded = BitUtils.AlignUp (address, blockSize);
  280. endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize);
  281. if (addressRounded < endAddrTruncated)
  282. {
  283. break;
  284. }
  285. }
  286. void FreeRegion(ulong currAddress)
  287. {
  288. for (int currBlockIndex = blockIndex;
  289. currBlockIndex < _blockOrdersCount && currAddress != 0;
  290. currBlockIndex++)
  291. {
  292. KMemoryRegionBlock block = _blocks[currBlockIndex];
  293. block.FreeCount++;
  294. ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order;
  295. int index = (int)freedBlocks;
  296. for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64)
  297. {
  298. long mask = block.Masks[level][index / 64];
  299. block.Masks[level][index / 64] = mask | (1L << (index & 63));
  300. if (mask != 0)
  301. {
  302. break;
  303. }
  304. }
  305. int blockSizeDelta = 1 << (block.NextOrder - block.Order);
  306. int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta);
  307. if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta))
  308. {
  309. break;
  310. }
  311. currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order);
  312. }
  313. }
  314. // Free inside aligned region.
  315. ulong baseAddress = addressRounded;
  316. while (baseAddress < endAddrTruncated)
  317. {
  318. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  319. FreeRegion(baseAddress);
  320. baseAddress += blockSize;
  321. }
  322. int nextBlockIndex = blockIndex - 1;
  323. // Free region between Address and aligned region start.
  324. baseAddress = addressRounded;
  325. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  326. {
  327. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  328. while (baseAddress - blockSize >= address)
  329. {
  330. baseAddress -= blockSize;
  331. FreeRegion(baseAddress);
  332. }
  333. }
  334. // Free region between aligned region end and End Address.
  335. baseAddress = endAddrTruncated;
  336. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  337. {
  338. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  339. while (baseAddress + blockSize <= endAddr)
  340. {
  341. FreeRegion(baseAddress);
  342. baseAddress += blockSize;
  343. }
  344. }
  345. }
  346. public ulong GetFreePages()
  347. {
  348. lock (_blocks)
  349. {
  350. return GetFreePagesImpl();
  351. }
  352. }
  353. private ulong GetFreePagesImpl()
  354. {
  355. ulong availablePages = 0;
  356. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  357. {
  358. KMemoryRegionBlock block = _blocks[blockIndex];
  359. ulong blockPagesCount = (1UL << block.Order) / KMemoryManager.PageSize;
  360. availablePages += blockPagesCount * block.FreeCount;
  361. }
  362. return availablePages;
  363. }
  364. }
  365. }