KMemoryRegionManager.cs 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. using Ryujinx.Common;
  2. using Ryujinx.HLE.HOS.Kernel.Common;
  3. namespace Ryujinx.HLE.HOS.Kernel.Memory
  4. {
  5. class KMemoryRegionManager
  6. {
  7. private static readonly int[] BlockOrders = new int[] { 12, 16, 21, 22, 25, 29, 30 };
  8. public ulong Address { get; private set; }
  9. public ulong EndAddr { get; private set; }
  10. public ulong Size { get; private set; }
  11. private int _blockOrdersCount;
  12. private KMemoryRegionBlock[] _blocks;
  13. public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
  14. {
  15. _blocks = new KMemoryRegionBlock[BlockOrders.Length];
  16. Address = address;
  17. Size = size;
  18. EndAddr = endAddr;
  19. _blockOrdersCount = BlockOrders.Length;
  20. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  21. {
  22. _blocks[blockIndex] = new KMemoryRegionBlock();
  23. _blocks[blockIndex].Order = BlockOrders[blockIndex];
  24. int nextOrder = blockIndex == _blockOrdersCount - 1 ? 0 : BlockOrders[blockIndex + 1];
  25. _blocks[blockIndex].NextOrder = nextOrder;
  26. int currBlockSize = 1 << BlockOrders[blockIndex];
  27. int nextBlockSize = currBlockSize;
  28. if (nextOrder != 0)
  29. {
  30. nextBlockSize = 1 << nextOrder;
  31. }
  32. ulong startAligned = BitUtils.AlignDown(address, nextBlockSize);
  33. ulong endAddrAligned = BitUtils.AlignDown(endAddr, currBlockSize);
  34. ulong sizeInBlocksTruncated = (endAddrAligned - startAligned) >> BlockOrders[blockIndex];
  35. ulong endAddrRounded = BitUtils.AlignUp(address + size, nextBlockSize);
  36. ulong sizeInBlocksRounded = (endAddrRounded - startAligned) >> BlockOrders[blockIndex];
  37. _blocks[blockIndex].StartAligned = startAligned;
  38. _blocks[blockIndex].SizeInBlocksTruncated = sizeInBlocksTruncated;
  39. _blocks[blockIndex].SizeInBlocksRounded = sizeInBlocksRounded;
  40. ulong currSizeInBlocks = sizeInBlocksRounded;
  41. int maxLevel = 0;
  42. do
  43. {
  44. maxLevel++;
  45. }
  46. while ((currSizeInBlocks /= 64) != 0);
  47. _blocks[blockIndex].MaxLevel = maxLevel;
  48. _blocks[blockIndex].Masks = new long[maxLevel][];
  49. currSizeInBlocks = sizeInBlocksRounded;
  50. for (int level = maxLevel - 1; level >= 0; level--)
  51. {
  52. currSizeInBlocks = (currSizeInBlocks + 63) / 64;
  53. _blocks[blockIndex].Masks[level] = new long[currSizeInBlocks];
  54. }
  55. }
  56. if (size != 0)
  57. {
  58. FreePages(address, size / KMemoryManager.PageSize);
  59. }
  60. }
  61. public KernelResult AllocatePages(ulong pagesCount, bool backwards, out KPageList pageList)
  62. {
  63. lock (_blocks)
  64. {
  65. return AllocatePagesImpl(pagesCount, backwards, out pageList);
  66. }
  67. }
  68. private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList)
  69. {
  70. pageList = new KPageList();
  71. if (_blockOrdersCount > 0)
  72. {
  73. if (GetFreePagesImpl() < pagesCount)
  74. {
  75. return KernelResult.OutOfMemory;
  76. }
  77. }
  78. else if (pagesCount != 0)
  79. {
  80. return KernelResult.OutOfMemory;
  81. }
  82. for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--)
  83. {
  84. KMemoryRegionBlock block = _blocks[blockIndex];
  85. ulong bestFitBlockSize = 1UL << block.Order;
  86. ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize;
  87. //Check if this is the best fit for this page size.
  88. //If so, try allocating as much requested pages as possible.
  89. while (blockPagesCount <= pagesCount)
  90. {
  91. ulong address = 0;
  92. for (int currBlockIndex = blockIndex;
  93. currBlockIndex < _blockOrdersCount && address == 0;
  94. currBlockIndex++)
  95. {
  96. block = _blocks[currBlockIndex];
  97. int index = 0;
  98. bool zeroMask = false;
  99. for (int level = 0; level < block.MaxLevel; level++)
  100. {
  101. long mask = block.Masks[level][index];
  102. if (mask == 0)
  103. {
  104. zeroMask = true;
  105. break;
  106. }
  107. if (backwards)
  108. {
  109. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  110. }
  111. else
  112. {
  113. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  114. }
  115. }
  116. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  117. {
  118. continue;
  119. }
  120. block.FreeCount--;
  121. int tempIdx = index;
  122. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  123. {
  124. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  125. if (block.Masks[level][tempIdx / 64] != 0)
  126. {
  127. break;
  128. }
  129. }
  130. address = block.StartAligned + ((ulong)index << block.Order);
  131. }
  132. for (int currBlockIndex = blockIndex;
  133. currBlockIndex < _blockOrdersCount && address == 0;
  134. currBlockIndex++)
  135. {
  136. block = _blocks[currBlockIndex];
  137. int index = 0;
  138. bool zeroMask = false;
  139. for (int level = 0; level < block.MaxLevel; level++)
  140. {
  141. long mask = block.Masks[level][index];
  142. if (mask == 0)
  143. {
  144. zeroMask = true;
  145. break;
  146. }
  147. if (backwards)
  148. {
  149. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  150. }
  151. else
  152. {
  153. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  154. }
  155. }
  156. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  157. {
  158. continue;
  159. }
  160. block.FreeCount--;
  161. int tempIdx = index;
  162. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  163. {
  164. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  165. if (block.Masks[level][tempIdx / 64] != 0)
  166. {
  167. break;
  168. }
  169. }
  170. address = block.StartAligned + ((ulong)index << block.Order);
  171. }
  172. //The address being zero means that no free space was found on that order,
  173. //just give up and try with the next one.
  174. if (address == 0)
  175. {
  176. break;
  177. }
  178. //If we are using a larger order than best fit, then we should
  179. //split it into smaller blocks.
  180. ulong firstFreeBlockSize = 1UL << block.Order;
  181. if (firstFreeBlockSize > bestFitBlockSize)
  182. {
  183. FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize);
  184. }
  185. //Add new allocated page(s) to the pages list.
  186. //If an error occurs, then free all allocated pages and fail.
  187. KernelResult result = pageList.AddRange(address, blockPagesCount);
  188. if (result != KernelResult.Success)
  189. {
  190. FreePages(address, blockPagesCount);
  191. foreach (KPageNode pageNode in pageList)
  192. {
  193. FreePages(pageNode.Address, pageNode.PagesCount);
  194. }
  195. return result;
  196. }
  197. pagesCount -= blockPagesCount;
  198. }
  199. }
  200. //Success case, all requested pages were allocated successfully.
  201. if (pagesCount == 0)
  202. {
  203. return KernelResult.Success;
  204. }
  205. //Error case, free allocated pages and return out of memory.
  206. foreach (KPageNode pageNode in pageList)
  207. {
  208. FreePages(pageNode.Address, pageNode.PagesCount);
  209. }
  210. pageList = null;
  211. return KernelResult.OutOfMemory;
  212. }
  213. public void FreePages(KPageList pageList)
  214. {
  215. lock (_blocks)
  216. {
  217. foreach (KPageNode pageNode in pageList)
  218. {
  219. FreePages(pageNode.Address, pageNode.PagesCount);
  220. }
  221. }
  222. }
  223. private void FreePages(ulong address, ulong pagesCount)
  224. {
  225. ulong endAddr = address + pagesCount * KMemoryManager.PageSize;
  226. int blockIndex = _blockOrdersCount - 1;
  227. ulong addressRounded = 0;
  228. ulong endAddrTruncated = 0;
  229. for (; blockIndex >= 0; blockIndex--)
  230. {
  231. KMemoryRegionBlock allocInfo = _blocks[blockIndex];
  232. int blockSize = 1 << allocInfo.Order;
  233. addressRounded = BitUtils.AlignUp (address, blockSize);
  234. endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize);
  235. if (addressRounded < endAddrTruncated)
  236. {
  237. break;
  238. }
  239. }
  240. void FreeRegion(ulong currAddress)
  241. {
  242. for (int currBlockIndex = blockIndex;
  243. currBlockIndex < _blockOrdersCount && currAddress != 0;
  244. currBlockIndex++)
  245. {
  246. KMemoryRegionBlock block = _blocks[currBlockIndex];
  247. block.FreeCount++;
  248. ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order;
  249. int index = (int)freedBlocks;
  250. for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64)
  251. {
  252. long mask = block.Masks[level][index / 64];
  253. block.Masks[level][index / 64] = mask | (1L << (index & 63));
  254. if (mask != 0)
  255. {
  256. break;
  257. }
  258. }
  259. int blockSizeDelta = 1 << (block.NextOrder - block.Order);
  260. int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta);
  261. if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta))
  262. {
  263. break;
  264. }
  265. currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order);
  266. }
  267. }
  268. //Free inside aligned region.
  269. ulong baseAddress = addressRounded;
  270. while (baseAddress < endAddrTruncated)
  271. {
  272. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  273. FreeRegion(baseAddress);
  274. baseAddress += blockSize;
  275. }
  276. int nextBlockIndex = blockIndex - 1;
  277. //Free region between Address and aligned region start.
  278. baseAddress = addressRounded;
  279. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  280. {
  281. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  282. while (baseAddress - blockSize >= address)
  283. {
  284. baseAddress -= blockSize;
  285. FreeRegion(baseAddress);
  286. }
  287. }
  288. //Free region between aligned region end and End Address.
  289. baseAddress = endAddrTruncated;
  290. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  291. {
  292. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  293. while (baseAddress + blockSize <= endAddr)
  294. {
  295. FreeRegion(baseAddress);
  296. baseAddress += blockSize;
  297. }
  298. }
  299. }
  300. public ulong GetFreePages()
  301. {
  302. lock (_blocks)
  303. {
  304. return GetFreePagesImpl();
  305. }
  306. }
  307. private ulong GetFreePagesImpl()
  308. {
  309. ulong availablePages = 0;
  310. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  311. {
  312. KMemoryRegionBlock block = _blocks[blockIndex];
  313. ulong blockPagesCount = (1UL << block.Order) / KMemoryManager.PageSize;
  314. availablePages += blockPagesCount * block.FreeCount;
  315. }
  316. return availablePages;
  317. }
  318. }
  319. }