KMemoryRegionManager.cs 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. using Ryujinx.Common;
  2. namespace Ryujinx.HLE.HOS.Kernel
  3. {
  4. class KMemoryRegionManager
  5. {
  6. private static readonly int[] BlockOrders = new int[] { 12, 16, 21, 22, 25, 29, 30 };
  7. public ulong Address { get; }
  8. public ulong EndAddr { get; }
  9. public ulong Size { get; }
  10. private int _blockOrdersCount;
  11. private KMemoryRegionBlock[] _blocks;
  12. public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
  13. {
  14. _blocks = new KMemoryRegionBlock[BlockOrders.Length];
  15. Address = address;
  16. Size = size;
  17. EndAddr = endAddr;
  18. _blockOrdersCount = BlockOrders.Length;
  19. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  20. {
  21. _blocks[blockIndex] = new KMemoryRegionBlock();
  22. _blocks[blockIndex].Order = BlockOrders[blockIndex];
  23. int nextOrder = blockIndex == _blockOrdersCount - 1 ? 0 : BlockOrders[blockIndex + 1];
  24. _blocks[blockIndex].NextOrder = nextOrder;
  25. int currBlockSize = 1 << BlockOrders[blockIndex];
  26. int nextBlockSize = currBlockSize;
  27. if (nextOrder != 0)
  28. {
  29. nextBlockSize = 1 << nextOrder;
  30. }
  31. ulong startAligned = BitUtils.AlignDown(address, nextBlockSize);
  32. ulong endAddrAligned = BitUtils.AlignDown(endAddr, currBlockSize);
  33. ulong sizeInBlocksTruncated = (endAddrAligned - startAligned) >> BlockOrders[blockIndex];
  34. ulong endAddrRounded = BitUtils.AlignUp(address + size, nextBlockSize);
  35. ulong sizeInBlocksRounded = (endAddrRounded - startAligned) >> BlockOrders[blockIndex];
  36. _blocks[blockIndex].StartAligned = startAligned;
  37. _blocks[blockIndex].SizeInBlocksTruncated = sizeInBlocksTruncated;
  38. _blocks[blockIndex].SizeInBlocksRounded = sizeInBlocksRounded;
  39. ulong currSizeInBlocks = sizeInBlocksRounded;
  40. int maxLevel = 0;
  41. do
  42. {
  43. maxLevel++;
  44. }
  45. while ((currSizeInBlocks /= 64) != 0);
  46. _blocks[blockIndex].MaxLevel = maxLevel;
  47. _blocks[blockIndex].Masks = new long[maxLevel][];
  48. currSizeInBlocks = sizeInBlocksRounded;
  49. for (int level = maxLevel - 1; level >= 0; level--)
  50. {
  51. currSizeInBlocks = (currSizeInBlocks + 63) / 64;
  52. _blocks[blockIndex].Masks[level] = new long[currSizeInBlocks];
  53. }
  54. }
  55. if (size != 0)
  56. {
  57. FreePages(address, size / KMemoryManager.PageSize);
  58. }
  59. }
  60. public KernelResult AllocatePages(ulong pagesCount, bool backwards, out KPageList pageList)
  61. {
  62. lock (_blocks)
  63. {
  64. return AllocatePagesImpl(pagesCount, backwards, out pageList);
  65. }
  66. }
  67. private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList)
  68. {
  69. pageList = new KPageList();
  70. if (_blockOrdersCount > 0)
  71. {
  72. if (GetFreePagesImpl() < pagesCount)
  73. {
  74. return KernelResult.OutOfMemory;
  75. }
  76. }
  77. else if (pagesCount != 0)
  78. {
  79. return KernelResult.OutOfMemory;
  80. }
  81. for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--)
  82. {
  83. KMemoryRegionBlock block = _blocks[blockIndex];
  84. ulong bestFitBlockSize = 1UL << block.Order;
  85. ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize;
  86. //Check if this is the best fit for this page size.
  87. //If so, try allocating as much requested pages as possible.
  88. while (blockPagesCount <= pagesCount)
  89. {
  90. ulong address = 0;
  91. for (int currBlockIndex = blockIndex;
  92. currBlockIndex < _blockOrdersCount && address == 0;
  93. currBlockIndex++)
  94. {
  95. block = _blocks[currBlockIndex];
  96. int index = 0;
  97. bool zeroMask = false;
  98. for (int level = 0; level < block.MaxLevel; level++)
  99. {
  100. long mask = block.Masks[level][index];
  101. if (mask == 0)
  102. {
  103. zeroMask = true;
  104. break;
  105. }
  106. if (backwards)
  107. {
  108. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  109. }
  110. else
  111. {
  112. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  113. }
  114. }
  115. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  116. {
  117. continue;
  118. }
  119. block.FreeCount--;
  120. int tempIdx = index;
  121. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  122. {
  123. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  124. if (block.Masks[level][tempIdx / 64] != 0)
  125. {
  126. break;
  127. }
  128. }
  129. address = block.StartAligned + ((ulong)index << block.Order);
  130. }
  131. for (int currBlockIndex = blockIndex;
  132. currBlockIndex < _blockOrdersCount && address == 0;
  133. currBlockIndex++)
  134. {
  135. block = _blocks[currBlockIndex];
  136. int index = 0;
  137. bool zeroMask = false;
  138. for (int level = 0; level < block.MaxLevel; level++)
  139. {
  140. long mask = block.Masks[level][index];
  141. if (mask == 0)
  142. {
  143. zeroMask = true;
  144. break;
  145. }
  146. if (backwards)
  147. {
  148. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  149. }
  150. else
  151. {
  152. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  153. }
  154. }
  155. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  156. {
  157. continue;
  158. }
  159. block.FreeCount--;
  160. int tempIdx = index;
  161. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  162. {
  163. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  164. if (block.Masks[level][tempIdx / 64] != 0)
  165. {
  166. break;
  167. }
  168. }
  169. address = block.StartAligned + ((ulong)index << block.Order);
  170. }
  171. //The address being zero means that no free space was found on that order,
  172. //just give up and try with the next one.
  173. if (address == 0)
  174. {
  175. break;
  176. }
  177. //If we are using a larger order than best fit, then we should
  178. //split it into smaller blocks.
  179. ulong firstFreeBlockSize = 1UL << block.Order;
  180. if (firstFreeBlockSize > bestFitBlockSize)
  181. {
  182. FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize);
  183. }
  184. //Add new allocated page(s) to the pages list.
  185. //If an error occurs, then free all allocated pages and fail.
  186. KernelResult result = pageList.AddRange(address, blockPagesCount);
  187. if (result != KernelResult.Success)
  188. {
  189. FreePages(address, blockPagesCount);
  190. foreach (KPageNode pageNode in pageList)
  191. {
  192. FreePages(pageNode.Address, pageNode.PagesCount);
  193. }
  194. return result;
  195. }
  196. pagesCount -= blockPagesCount;
  197. }
  198. }
  199. //Success case, all requested pages were allocated successfully.
  200. if (pagesCount == 0)
  201. {
  202. return KernelResult.Success;
  203. }
  204. //Error case, free allocated pages and return out of memory.
  205. foreach (KPageNode pageNode in pageList)
  206. {
  207. FreePages(pageNode.Address, pageNode.PagesCount);
  208. }
  209. pageList = null;
  210. return KernelResult.OutOfMemory;
  211. }
  212. public void FreePages(KPageList pageList)
  213. {
  214. lock (_blocks)
  215. {
  216. foreach (KPageNode pageNode in pageList)
  217. {
  218. FreePages(pageNode.Address, pageNode.PagesCount);
  219. }
  220. }
  221. }
  222. private void FreePages(ulong address, ulong pagesCount)
  223. {
  224. ulong endAddr = address + pagesCount * KMemoryManager.PageSize;
  225. int blockIndex = _blockOrdersCount - 1;
  226. ulong addressRounded = 0;
  227. ulong endAddrTruncated = 0;
  228. for (; blockIndex >= 0; blockIndex--)
  229. {
  230. KMemoryRegionBlock allocInfo = _blocks[blockIndex];
  231. int blockSize = 1 << allocInfo.Order;
  232. addressRounded = BitUtils.AlignUp (address, blockSize);
  233. endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize);
  234. if (addressRounded < endAddrTruncated)
  235. {
  236. break;
  237. }
  238. }
  239. void FreeRegion(ulong currAddress)
  240. {
  241. for (int currBlockIndex = blockIndex;
  242. currBlockIndex < _blockOrdersCount && currAddress != 0;
  243. currBlockIndex++)
  244. {
  245. KMemoryRegionBlock block = _blocks[currBlockIndex];
  246. block.FreeCount++;
  247. ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order;
  248. int index = (int)freedBlocks;
  249. for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64)
  250. {
  251. long mask = block.Masks[level][index / 64];
  252. block.Masks[level][index / 64] = mask | (1L << (index & 63));
  253. if (mask != 0)
  254. {
  255. break;
  256. }
  257. }
  258. int blockSizeDelta = 1 << (block.NextOrder - block.Order);
  259. int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta);
  260. if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta))
  261. {
  262. break;
  263. }
  264. currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order);
  265. }
  266. }
  267. //Free inside aligned region.
  268. ulong baseAddress = addressRounded;
  269. while (baseAddress < endAddrTruncated)
  270. {
  271. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  272. FreeRegion(baseAddress);
  273. baseAddress += blockSize;
  274. }
  275. int nextBlockIndex = blockIndex - 1;
  276. //Free region between Address and aligned region start.
  277. baseAddress = addressRounded;
  278. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  279. {
  280. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  281. while (baseAddress - blockSize >= address)
  282. {
  283. baseAddress -= blockSize;
  284. FreeRegion(baseAddress);
  285. }
  286. }
  287. //Free region between aligned region end and End Address.
  288. baseAddress = endAddrTruncated;
  289. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  290. {
  291. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  292. while (baseAddress + blockSize <= endAddr)
  293. {
  294. FreeRegion(baseAddress);
  295. baseAddress += blockSize;
  296. }
  297. }
  298. }
  299. public ulong GetFreePages()
  300. {
  301. lock (_blocks)
  302. {
  303. return GetFreePagesImpl();
  304. }
  305. }
  306. private ulong GetFreePagesImpl()
  307. {
  308. ulong availablePages = 0;
  309. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  310. {
  311. KMemoryRegionBlock block = _blocks[blockIndex];
  312. ulong blockPagesCount = (1UL << block.Order) / KMemoryManager.PageSize;
  313. availablePages += blockPagesCount * block.FreeCount;
  314. }
  315. return availablePages;
  316. }
  317. }
  318. }