KMemoryRegionManager.cs 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. using Ryujinx.Common;
  2. using Ryujinx.HLE.HOS.Kernel.Common;
  3. using System.Diagnostics;
  4. namespace Ryujinx.HLE.HOS.Kernel.Memory
  5. {
  6. class KMemoryRegionManager
  7. {
  8. private static readonly int[] BlockOrders = new int[] { 12, 16, 21, 22, 25, 29, 30 };
  9. public ulong Address { get; private set; }
  10. public ulong EndAddr { get; private set; }
  11. public ulong Size { get; private set; }
  12. private int _blockOrdersCount;
  13. private readonly KMemoryRegionBlock[] _blocks;
  14. private readonly ushort[] _pageReferenceCounts;
  15. public KMemoryRegionManager(ulong address, ulong size, ulong endAddr)
  16. {
  17. _blocks = new KMemoryRegionBlock[BlockOrders.Length];
  18. Address = address;
  19. Size = size;
  20. EndAddr = endAddr;
  21. _blockOrdersCount = BlockOrders.Length;
  22. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  23. {
  24. _blocks[blockIndex] = new KMemoryRegionBlock();
  25. _blocks[blockIndex].Order = BlockOrders[blockIndex];
  26. int nextOrder = blockIndex == _blockOrdersCount - 1 ? 0 : BlockOrders[blockIndex + 1];
  27. _blocks[blockIndex].NextOrder = nextOrder;
  28. int currBlockSize = 1 << BlockOrders[blockIndex];
  29. int nextBlockSize = currBlockSize;
  30. if (nextOrder != 0)
  31. {
  32. nextBlockSize = 1 << nextOrder;
  33. }
  34. ulong startAligned = BitUtils.AlignDown(address, nextBlockSize);
  35. ulong endAddrAligned = BitUtils.AlignDown(endAddr, currBlockSize);
  36. ulong sizeInBlocksTruncated = (endAddrAligned - startAligned) >> BlockOrders[blockIndex];
  37. ulong endAddrRounded = BitUtils.AlignUp(address + size, nextBlockSize);
  38. ulong sizeInBlocksRounded = (endAddrRounded - startAligned) >> BlockOrders[blockIndex];
  39. _blocks[blockIndex].StartAligned = startAligned;
  40. _blocks[blockIndex].SizeInBlocksTruncated = sizeInBlocksTruncated;
  41. _blocks[blockIndex].SizeInBlocksRounded = sizeInBlocksRounded;
  42. ulong currSizeInBlocks = sizeInBlocksRounded;
  43. int maxLevel = 0;
  44. do
  45. {
  46. maxLevel++;
  47. }
  48. while ((currSizeInBlocks /= 64) != 0);
  49. _blocks[blockIndex].MaxLevel = maxLevel;
  50. _blocks[blockIndex].Masks = new long[maxLevel][];
  51. currSizeInBlocks = sizeInBlocksRounded;
  52. for (int level = maxLevel - 1; level >= 0; level--)
  53. {
  54. currSizeInBlocks = (currSizeInBlocks + 63) / 64;
  55. _blocks[blockIndex].Masks[level] = new long[currSizeInBlocks];
  56. }
  57. }
  58. _pageReferenceCounts = new ushort[size / KPageTableBase.PageSize];
  59. if (size != 0)
  60. {
  61. FreePages(address, size / KPageTableBase.PageSize);
  62. }
  63. }
  64. public KernelResult AllocatePages(ulong pagesCount, bool backwards, out KPageList pageList)
  65. {
  66. lock (_blocks)
  67. {
  68. KernelResult result = AllocatePagesImpl(pagesCount, backwards, out pageList);
  69. if (result == KernelResult.Success)
  70. {
  71. foreach (var node in pageList)
  72. {
  73. IncrementPagesReferenceCount(node.Address, node.PagesCount);
  74. }
  75. }
  76. return result;
  77. }
  78. }
  79. public ulong AllocatePagesContiguous(KernelContext context, ulong pagesCount, bool backwards)
  80. {
  81. lock (_blocks)
  82. {
  83. ulong address = AllocatePagesContiguousImpl(pagesCount, backwards);
  84. if (address != 0)
  85. {
  86. IncrementPagesReferenceCount(address, pagesCount);
  87. context.Memory.Commit(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize);
  88. }
  89. return address;
  90. }
  91. }
  92. private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList)
  93. {
  94. pageList = new KPageList();
  95. if (_blockOrdersCount > 0)
  96. {
  97. if (GetFreePagesImpl() < pagesCount)
  98. {
  99. return KernelResult.OutOfMemory;
  100. }
  101. }
  102. else if (pagesCount != 0)
  103. {
  104. return KernelResult.OutOfMemory;
  105. }
  106. for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--)
  107. {
  108. KMemoryRegionBlock block = _blocks[blockIndex];
  109. ulong bestFitBlockSize = 1UL << block.Order;
  110. ulong blockPagesCount = bestFitBlockSize / KPageTableBase.PageSize;
  111. // Check if this is the best fit for this page size.
  112. // If so, try allocating as much requested pages as possible.
  113. while (blockPagesCount <= pagesCount)
  114. {
  115. ulong address = AllocatePagesForOrder(blockIndex, backwards, bestFitBlockSize);
  116. // The address being zero means that no free space was found on that order,
  117. // just give up and try with the next one.
  118. if (address == 0)
  119. {
  120. break;
  121. }
  122. // Add new allocated page(s) to the pages list.
  123. // If an error occurs, then free all allocated pages and fail.
  124. KernelResult result = pageList.AddRange(address, blockPagesCount);
  125. if (result != KernelResult.Success)
  126. {
  127. FreePages(address, blockPagesCount);
  128. foreach (KPageNode pageNode in pageList)
  129. {
  130. FreePages(pageNode.Address, pageNode.PagesCount);
  131. }
  132. return result;
  133. }
  134. pagesCount -= blockPagesCount;
  135. }
  136. }
  137. // Success case, all requested pages were allocated successfully.
  138. if (pagesCount == 0)
  139. {
  140. return KernelResult.Success;
  141. }
  142. // Error case, free allocated pages and return out of memory.
  143. foreach (KPageNode pageNode in pageList)
  144. {
  145. FreePages(pageNode.Address, pageNode.PagesCount);
  146. }
  147. pageList = null;
  148. return KernelResult.OutOfMemory;
  149. }
  150. private ulong AllocatePagesContiguousImpl(ulong pagesCount, bool backwards)
  151. {
  152. if (pagesCount == 0 || _blocks.Length < 1)
  153. {
  154. return 0;
  155. }
  156. int blockIndex = 0;
  157. while ((1UL << _blocks[blockIndex].Order) / KPageTableBase.PageSize < pagesCount)
  158. {
  159. if (++blockIndex >= _blocks.Length)
  160. {
  161. return 0;
  162. }
  163. }
  164. ulong tightestFitBlockSize = 1UL << _blocks[blockIndex].Order;
  165. ulong address = AllocatePagesForOrder(blockIndex, backwards, tightestFitBlockSize);
  166. ulong requiredSize = pagesCount * KPageTableBase.PageSize;
  167. if (address != 0 && tightestFitBlockSize > requiredSize)
  168. {
  169. FreePages(address + requiredSize, (tightestFitBlockSize - requiredSize) / KPageTableBase.PageSize);
  170. }
  171. return address;
  172. }
  173. private ulong AllocatePagesForOrder(int blockIndex, bool backwards, ulong bestFitBlockSize)
  174. {
  175. ulong address = 0;
  176. KMemoryRegionBlock block = null;
  177. for (int currBlockIndex = blockIndex;
  178. currBlockIndex < _blockOrdersCount && address == 0;
  179. currBlockIndex++)
  180. {
  181. block = _blocks[currBlockIndex];
  182. int index = 0;
  183. bool zeroMask = false;
  184. for (int level = 0; level < block.MaxLevel; level++)
  185. {
  186. long mask = block.Masks[level][index];
  187. if (mask == 0)
  188. {
  189. zeroMask = true;
  190. break;
  191. }
  192. if (backwards)
  193. {
  194. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  195. }
  196. else
  197. {
  198. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  199. }
  200. }
  201. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  202. {
  203. continue;
  204. }
  205. block.FreeCount--;
  206. int tempIdx = index;
  207. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  208. {
  209. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  210. if (block.Masks[level][tempIdx / 64] != 0)
  211. {
  212. break;
  213. }
  214. }
  215. address = block.StartAligned + ((ulong)index << block.Order);
  216. }
  217. for (int currBlockIndex = blockIndex;
  218. currBlockIndex < _blockOrdersCount && address == 0;
  219. currBlockIndex++)
  220. {
  221. block = _blocks[currBlockIndex];
  222. int index = 0;
  223. bool zeroMask = false;
  224. for (int level = 0; level < block.MaxLevel; level++)
  225. {
  226. long mask = block.Masks[level][index];
  227. if (mask == 0)
  228. {
  229. zeroMask = true;
  230. break;
  231. }
  232. if (backwards)
  233. {
  234. index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask));
  235. }
  236. else
  237. {
  238. index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask);
  239. }
  240. }
  241. if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask)
  242. {
  243. continue;
  244. }
  245. block.FreeCount--;
  246. int tempIdx = index;
  247. for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64)
  248. {
  249. block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63));
  250. if (block.Masks[level][tempIdx / 64] != 0)
  251. {
  252. break;
  253. }
  254. }
  255. address = block.StartAligned + ((ulong)index << block.Order);
  256. }
  257. if (address != 0)
  258. {
  259. // If we are using a larger order than best fit, then we should
  260. // split it into smaller blocks.
  261. ulong firstFreeBlockSize = 1UL << block.Order;
  262. if (firstFreeBlockSize > bestFitBlockSize)
  263. {
  264. FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KPageTableBase.PageSize);
  265. }
  266. }
  267. return address;
  268. }
  269. private void FreePages(ulong address, ulong pagesCount)
  270. {
  271. lock (_blocks)
  272. {
  273. ulong endAddr = address + pagesCount * KPageTableBase.PageSize;
  274. int blockIndex = _blockOrdersCount - 1;
  275. ulong addressRounded = 0;
  276. ulong endAddrTruncated = 0;
  277. for (; blockIndex >= 0; blockIndex--)
  278. {
  279. KMemoryRegionBlock allocInfo = _blocks[blockIndex];
  280. int blockSize = 1 << allocInfo.Order;
  281. addressRounded = BitUtils.AlignUp (address, blockSize);
  282. endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize);
  283. if (addressRounded < endAddrTruncated)
  284. {
  285. break;
  286. }
  287. }
  288. void FreeRegion(ulong currAddress)
  289. {
  290. for (int currBlockIndex = blockIndex;
  291. currBlockIndex < _blockOrdersCount && currAddress != 0;
  292. currBlockIndex++)
  293. {
  294. KMemoryRegionBlock block = _blocks[currBlockIndex];
  295. block.FreeCount++;
  296. ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order;
  297. int index = (int)freedBlocks;
  298. for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64)
  299. {
  300. long mask = block.Masks[level][index / 64];
  301. block.Masks[level][index / 64] = mask | (1L << (index & 63));
  302. if (mask != 0)
  303. {
  304. break;
  305. }
  306. }
  307. int blockSizeDelta = 1 << (block.NextOrder - block.Order);
  308. int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta);
  309. if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta))
  310. {
  311. break;
  312. }
  313. currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order);
  314. }
  315. }
  316. // Free inside aligned region.
  317. ulong baseAddress = addressRounded;
  318. while (baseAddress < endAddrTruncated)
  319. {
  320. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  321. FreeRegion(baseAddress);
  322. baseAddress += blockSize;
  323. }
  324. int nextBlockIndex = blockIndex - 1;
  325. // Free region between Address and aligned region start.
  326. baseAddress = addressRounded;
  327. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  328. {
  329. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  330. while (baseAddress - blockSize >= address)
  331. {
  332. baseAddress -= blockSize;
  333. FreeRegion(baseAddress);
  334. }
  335. }
  336. // Free region between aligned region end and End Address.
  337. baseAddress = endAddrTruncated;
  338. for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--)
  339. {
  340. ulong blockSize = 1UL << _blocks[blockIndex].Order;
  341. while (baseAddress + blockSize <= endAddr)
  342. {
  343. FreeRegion(baseAddress);
  344. baseAddress += blockSize;
  345. }
  346. }
  347. }
  348. }
  349. public ulong GetFreePages()
  350. {
  351. lock (_blocks)
  352. {
  353. return GetFreePagesImpl();
  354. }
  355. }
  356. private ulong GetFreePagesImpl()
  357. {
  358. ulong availablePages = 0;
  359. for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++)
  360. {
  361. KMemoryRegionBlock block = _blocks[blockIndex];
  362. ulong blockPagesCount = (1UL << block.Order) / KPageTableBase.PageSize;
  363. availablePages += blockPagesCount * block.FreeCount;
  364. }
  365. return availablePages;
  366. }
  367. public void IncrementPagesReferenceCount(ulong address, ulong pagesCount)
  368. {
  369. ulong index = GetPageOffset(address);
  370. ulong endIndex = index + pagesCount;
  371. while (index < endIndex)
  372. {
  373. ushort referenceCount = ++_pageReferenceCounts[index];
  374. Debug.Assert(referenceCount >= 1);
  375. index++;
  376. }
  377. }
  378. public void DecrementPagesReferenceCount(ulong address, ulong pagesCount)
  379. {
  380. ulong index = GetPageOffset(address);
  381. ulong endIndex = index + pagesCount;
  382. ulong freeBaseIndex = 0;
  383. ulong freePagesCount = 0;
  384. while (index < endIndex)
  385. {
  386. Debug.Assert(_pageReferenceCounts[index] > 0);
  387. ushort referenceCount = --_pageReferenceCounts[index];
  388. if (referenceCount == 0)
  389. {
  390. if (freePagesCount != 0)
  391. {
  392. freePagesCount++;
  393. }
  394. else
  395. {
  396. freeBaseIndex = index;
  397. freePagesCount = 1;
  398. }
  399. }
  400. else if (freePagesCount != 0)
  401. {
  402. FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
  403. freePagesCount = 0;
  404. }
  405. index++;
  406. }
  407. if (freePagesCount != 0)
  408. {
  409. FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount);
  410. }
  411. }
  412. public ulong GetPageOffset(ulong address)
  413. {
  414. return (address - Address) / KPageTableBase.PageSize;
  415. }
  416. public ulong GetPageOffsetFromEnd(ulong address)
  417. {
  418. return (EndAddr - address) / KPageTableBase.PageSize;
  419. }
  420. }
  421. }