Buffer.cs 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037
  1. using Ryujinx.Graphics.GAL;
  2. using Ryujinx.Graphics.Gpu.Synchronization;
  3. using Ryujinx.Memory.Range;
  4. using Ryujinx.Memory.Tracking;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Linq;
  8. using System.Runtime.CompilerServices;
  9. using System.Threading;
  10. namespace Ryujinx.Graphics.Gpu.Memory
  11. {
  12. delegate void BufferFlushAction(ulong address, ulong size, ulong syncNumber);
  13. /// <summary>
  14. /// Buffer, used to store vertex and index data, uniform and storage buffers, and others.
  15. /// </summary>
  16. class Buffer : IRange, ISyncActionHandler, IDisposable
  17. {
  18. private const ulong GranularBufferThreshold = 4096;
  19. private readonly GpuContext _context;
  20. private readonly PhysicalMemory _physicalMemory;
  21. /// <summary>
  22. /// Host buffer handle.
  23. /// </summary>
  24. public BufferHandle Handle { get; private set; }
  25. /// <summary>
  26. /// Start address of the buffer in guest memory.
  27. /// </summary>
  28. public ulong Address { get; }
  29. /// <summary>
  30. /// Size of the buffer in bytes.
  31. /// </summary>
  32. public ulong Size { get; }
  33. /// <summary>
  34. /// End address of the buffer in guest memory.
  35. /// </summary>
  36. public ulong EndAddress => Address + Size;
  37. /// <summary>
  38. /// Increments when the buffer is (partially) unmapped or disposed.
  39. /// </summary>
  40. public int UnmappedSequence { get; private set; }
  41. /// <summary>
  42. /// Indicates if the buffer can be used in a sparse buffer mapping.
  43. /// </summary>
  44. public bool SparseCompatible { get; }
  45. /// <summary>
  46. /// Ranges of the buffer that have been modified on the GPU.
  47. /// Ranges defined here cannot be updated from CPU until a CPU waiting sync point is reached.
  48. /// Then, write tracking will signal, wait for GPU sync (generated at the syncpoint) and flush these regions.
  49. /// </summary>
  50. /// <remarks>
  51. /// This is null until at least one modification occurs.
  52. /// </remarks>
  53. private BufferModifiedRangeList _modifiedRanges = null;
  54. /// <summary>
  55. /// A structure that is used to flush buffer data back to a host mapped buffer for cached readback.
  56. /// Only used if the buffer data is explicitly owned by device local memory.
  57. /// </summary>
  58. private BufferPreFlush _preFlush = null;
  59. /// <summary>
  60. /// Usage tracking state that determines what type of backing the buffer should use.
  61. /// </summary>
  62. public BufferBackingState BackingState;
  63. private readonly MultiRegionHandle _memoryTrackingGranular;
  64. private readonly RegionHandle _memoryTracking;
  65. private readonly RegionSignal _externalFlushDelegate;
  66. private readonly Action<ulong, ulong> _loadDelegate;
  67. private readonly Action<ulong, ulong> _modifiedDelegate;
  68. private HashSet<MultiRangeBuffer> _virtualDependencies;
  69. private readonly ReaderWriterLockSlim _virtualDependenciesLock;
  70. private int _sequenceNumber;
  71. private readonly bool _useGranular;
  72. private bool _syncActionRegistered;
  73. private int _referenceCount = 1;
  74. private ulong _dirtyStart = ulong.MaxValue;
  75. private ulong _dirtyEnd = ulong.MaxValue;
  76. /// <summary>
  77. /// Creates a new instance of the buffer.
  78. /// </summary>
  79. /// <param name="context">GPU context that the buffer belongs to</param>
  80. /// <param name="physicalMemory">Physical memory where the buffer is mapped</param>
  81. /// <param name="address">Start address of the buffer</param>
  82. /// <param name="size">Size of the buffer in bytes</param>
  83. /// <param name="stage">The type of usage that created the buffer</param>
  84. /// <param name="sparseCompatible">Indicates if the buffer can be used in a sparse buffer mapping</param>
  85. /// <param name="baseBuffers">Buffers which this buffer contains, and will inherit tracking handles from</param>
  86. public Buffer(
  87. GpuContext context,
  88. PhysicalMemory physicalMemory,
  89. ulong address,
  90. ulong size,
  91. BufferStage stage,
  92. bool sparseCompatible,
  93. IEnumerable<Buffer> baseBuffers = null)
  94. {
  95. _context = context;
  96. _physicalMemory = physicalMemory;
  97. Address = address;
  98. Size = size;
  99. SparseCompatible = sparseCompatible;
  100. BackingState = new BufferBackingState(_context, this, stage, baseBuffers);
  101. BufferAccess access = BackingState.SwitchAccess(this);
  102. Handle = context.Renderer.CreateBuffer((int)size, access);
  103. _useGranular = size > GranularBufferThreshold;
  104. IEnumerable<IRegionHandle> baseHandles = null;
  105. if (baseBuffers != null)
  106. {
  107. baseHandles = baseBuffers.SelectMany(buffer =>
  108. {
  109. if (buffer._useGranular)
  110. {
  111. return buffer._memoryTrackingGranular.GetHandles();
  112. }
  113. else
  114. {
  115. return Enumerable.Repeat(buffer._memoryTracking, 1);
  116. }
  117. });
  118. }
  119. if (_useGranular)
  120. {
  121. _memoryTrackingGranular = physicalMemory.BeginGranularTracking(address, size, ResourceKind.Buffer, RegionFlags.UnalignedAccess, baseHandles);
  122. _memoryTrackingGranular.RegisterPreciseAction(address, size, PreciseAction);
  123. }
  124. else
  125. {
  126. _memoryTracking = physicalMemory.BeginTracking(address, size, ResourceKind.Buffer, RegionFlags.UnalignedAccess);
  127. if (baseHandles != null)
  128. {
  129. _memoryTracking.Reprotect(false);
  130. foreach (IRegionHandle handle in baseHandles)
  131. {
  132. if (handle.Dirty)
  133. {
  134. _memoryTracking.Reprotect(true);
  135. }
  136. handle.Dispose();
  137. }
  138. }
  139. _memoryTracking.RegisterPreciseAction(PreciseAction);
  140. }
  141. _externalFlushDelegate = new RegionSignal(ExternalFlush);
  142. _loadDelegate = new Action<ulong, ulong>(LoadRegion);
  143. _modifiedDelegate = new Action<ulong, ulong>(RegionModified);
  144. _virtualDependenciesLock = new ReaderWriterLockSlim();
  145. }
  146. /// <summary>
  147. /// Recreates the backing buffer based on the desired access type
  148. /// reported by the backing state struct.
  149. /// </summary>
  150. private void ChangeBacking()
  151. {
  152. BufferAccess access = BackingState.SwitchAccess(this);
  153. BufferHandle newHandle = _context.Renderer.CreateBuffer((int)Size, access);
  154. _context.Renderer.Pipeline.CopyBuffer(Handle, newHandle, 0, 0, (int)Size);
  155. _modifiedRanges?.SelfMigration();
  156. // If swtiching from device local to host mapped, pre-flushing data no longer makes sense.
  157. // This is set to null and disposed when the migration fully completes.
  158. _preFlush = null;
  159. Handle = newHandle;
  160. _physicalMemory.BufferCache.BufferBackingChanged(this);
  161. }
  162. /// <summary>
  163. /// Gets a sub-range from the buffer, from a start address til a page boundary after the given size.
  164. /// </summary>
  165. /// <remarks>
  166. /// This can be used to bind and use sub-ranges of the buffer on the host API.
  167. /// </remarks>
  168. /// <param name="address">Start address of the sub-range, must be greater than or equal to the buffer address</param>
  169. /// <param name="size">Size in bytes of the sub-range, must be less than or equal to the buffer size</param>
  170. /// <param name="write">Whether the buffer will be written to by this use</param>
  171. /// <returns>The buffer sub-range</returns>
  172. public BufferRange GetRangeAligned(ulong address, ulong size, bool write)
  173. {
  174. ulong end = ((address + size + MemoryManager.PageMask) & ~MemoryManager.PageMask) - Address;
  175. ulong offset = address - Address;
  176. return new BufferRange(Handle, (int)offset, (int)(end - offset), write);
  177. }
  178. /// <summary>
  179. /// Gets a sub-range from the buffer.
  180. /// </summary>
  181. /// <remarks>
  182. /// This can be used to bind and use sub-ranges of the buffer on the host API.
  183. /// </remarks>
  184. /// <param name="address">Start address of the sub-range, must be greater than or equal to the buffer address</param>
  185. /// <param name="size">Size in bytes of the sub-range, must be less than or equal to the buffer size</param>
  186. /// <param name="write">Whether the buffer will be written to by this use</param>
  187. /// <returns>The buffer sub-range</returns>
  188. public BufferRange GetRange(ulong address, ulong size, bool write)
  189. {
  190. int offset = (int)(address - Address);
  191. return new BufferRange(Handle, offset, (int)size, write);
  192. }
  193. /// <summary>
  194. /// Checks if a given range overlaps with the buffer.
  195. /// </summary>
  196. /// <param name="address">Start address of the range</param>
  197. /// <param name="size">Size in bytes of the range</param>
  198. /// <returns>True if the range overlaps, false otherwise</returns>
  199. public bool OverlapsWith(ulong address, ulong size)
  200. {
  201. return Address < address + size && address < EndAddress;
  202. }
  203. /// <summary>
  204. /// Checks if a given range is fully contained in the buffer.
  205. /// </summary>
  206. /// <param name="address">Start address of the range</param>
  207. /// <param name="size">Size in bytes of the range</param>
  208. /// <returns>True if the range is contained, false otherwise</returns>
  209. public bool FullyContains(ulong address, ulong size)
  210. {
  211. return address >= Address && address + size <= EndAddress;
  212. }
  213. /// <summary>
  214. /// Performs guest to host memory synchronization of the buffer data.
  215. /// </summary>
  216. /// <remarks>
  217. /// This causes the buffer data to be overwritten if a write was detected from the CPU,
  218. /// since the last call to this method.
  219. /// </remarks>
  220. /// <param name="address">Start address of the range to synchronize</param>
  221. /// <param name="size">Size in bytes of the range to synchronize</param>
  222. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  223. public void SynchronizeMemory(ulong address, ulong size)
  224. {
  225. if (_useGranular)
  226. {
  227. _memoryTrackingGranular.QueryModified(address, size, _modifiedDelegate, _context.SequenceNumber);
  228. }
  229. else
  230. {
  231. if (_context.SequenceNumber != _sequenceNumber && _memoryTracking.DirtyOrVolatile())
  232. {
  233. _memoryTracking.Reprotect();
  234. if (_modifiedRanges != null)
  235. {
  236. _modifiedRanges.ExcludeModifiedRegions(Address, Size, _loadDelegate);
  237. }
  238. else
  239. {
  240. BackingState.RecordSet();
  241. _context.Renderer.SetBufferData(Handle, 0, _physicalMemory.GetSpan(Address, (int)Size));
  242. CopyToDependantVirtualBuffers();
  243. }
  244. _sequenceNumber = _context.SequenceNumber;
  245. _dirtyStart = ulong.MaxValue;
  246. }
  247. }
  248. if (_dirtyStart != ulong.MaxValue)
  249. {
  250. ulong end = address + size;
  251. if (end > _dirtyStart && address < _dirtyEnd)
  252. {
  253. if (_modifiedRanges != null)
  254. {
  255. _modifiedRanges.ExcludeModifiedRegions(_dirtyStart, _dirtyEnd - _dirtyStart, _loadDelegate);
  256. }
  257. else
  258. {
  259. LoadRegion(_dirtyStart, _dirtyEnd - _dirtyStart);
  260. }
  261. _dirtyStart = ulong.MaxValue;
  262. }
  263. }
  264. }
  265. /// <summary>
  266. /// Ensure that the modified range list exists.
  267. /// </summary>
  268. private void EnsureRangeList()
  269. {
  270. _modifiedRanges ??= new BufferModifiedRangeList(_context, this, Flush);
  271. }
  272. /// <summary>
  273. /// Checks if a backing change is deemed necessary from the given usage.
  274. /// If it is, queues a backing change to happen on the next sync action.
  275. /// </summary>
  276. /// <param name="stage">Buffer stage that can change backing type</param>
  277. private void TryQueueBackingChange(BufferStage stage)
  278. {
  279. if (BackingState.ShouldChangeBacking(stage))
  280. {
  281. if (!_syncActionRegistered)
  282. {
  283. _context.RegisterSyncAction(this);
  284. _syncActionRegistered = true;
  285. }
  286. }
  287. }
  288. /// <summary>
  289. /// Signal that the given region of the buffer has been modified.
  290. /// </summary>
  291. /// <param name="address">The start address of the modified region</param>
  292. /// <param name="size">The size of the modified region</param>
  293. /// <param name="stage">Buffer stage that triggered the modification</param>
  294. public void SignalModified(ulong address, ulong size, BufferStage stage)
  295. {
  296. EnsureRangeList();
  297. TryQueueBackingChange(stage);
  298. _modifiedRanges.SignalModified(address, size);
  299. if (!_syncActionRegistered)
  300. {
  301. _context.RegisterSyncAction(this);
  302. _syncActionRegistered = true;
  303. }
  304. }
  305. /// <summary>
  306. /// Indicate that mofifications in a given region of this buffer have been overwritten.
  307. /// </summary>
  308. /// <param name="address">The start address of the region</param>
  309. /// <param name="size">The size of the region</param>
  310. public void ClearModified(ulong address, ulong size)
  311. {
  312. _modifiedRanges?.Clear(address, size);
  313. }
  314. /// <summary>
  315. /// Action to be performed immediately before sync is created.
  316. /// This will copy any buffer ranges designated for pre-flushing.
  317. /// </summary>
  318. /// <param name="syncpoint">True if the action is a guest syncpoint</param>
  319. public void SyncPreAction(bool syncpoint)
  320. {
  321. if (_referenceCount == 0)
  322. {
  323. return;
  324. }
  325. if (BackingState.ShouldChangeBacking())
  326. {
  327. ChangeBacking();
  328. }
  329. if (BackingState.IsDeviceLocal)
  330. {
  331. _preFlush ??= new BufferPreFlush(_context, this, FlushImpl);
  332. if (_preFlush.ShouldCopy)
  333. {
  334. _modifiedRanges?.GetRangesAtSync(Address, Size, _context.SyncNumber, (address, size) =>
  335. {
  336. _preFlush.CopyModified(address, size);
  337. });
  338. }
  339. }
  340. }
  341. /// <summary>
  342. /// Action to be performed when a syncpoint is reached after modification.
  343. /// This will register read/write tracking to flush the buffer from GPU when its memory is used.
  344. /// </summary>
  345. /// <inheritdoc/>
  346. public bool SyncAction(bool syncpoint)
  347. {
  348. _syncActionRegistered = false;
  349. if (_useGranular)
  350. {
  351. _modifiedRanges?.GetRanges(Address, Size, (address, size) =>
  352. {
  353. _memoryTrackingGranular.RegisterAction(address, size, _externalFlushDelegate);
  354. SynchronizeMemory(address, size);
  355. });
  356. }
  357. else
  358. {
  359. _memoryTracking.RegisterAction(_externalFlushDelegate);
  360. SynchronizeMemory(Address, Size);
  361. }
  362. return true;
  363. }
  364. /// <summary>
  365. /// Inherit modified and dirty ranges from another buffer.
  366. /// </summary>
  367. /// <param name="from">The buffer to inherit from</param>
  368. public void InheritModifiedRanges(Buffer from)
  369. {
  370. if (from._modifiedRanges != null && from._modifiedRanges.HasRanges)
  371. {
  372. if (from._syncActionRegistered && !_syncActionRegistered)
  373. {
  374. _context.RegisterSyncAction(this);
  375. _syncActionRegistered = true;
  376. }
  377. void registerRangeAction(ulong address, ulong size)
  378. {
  379. if (_useGranular)
  380. {
  381. _memoryTrackingGranular.RegisterAction(address, size, _externalFlushDelegate);
  382. }
  383. else
  384. {
  385. _memoryTracking.RegisterAction(_externalFlushDelegate);
  386. }
  387. }
  388. EnsureRangeList();
  389. _modifiedRanges.InheritRanges(from._modifiedRanges, registerRangeAction);
  390. }
  391. if (from._dirtyStart != ulong.MaxValue)
  392. {
  393. ForceDirty(from._dirtyStart, from._dirtyEnd - from._dirtyStart);
  394. }
  395. }
  396. /// <summary>
  397. /// Determine if a given region of the buffer has been modified, and must be flushed.
  398. /// </summary>
  399. /// <param name="address">The start address of the region</param>
  400. /// <param name="size">The size of the region</param>
  401. /// <returns></returns>
  402. public bool IsModified(ulong address, ulong size)
  403. {
  404. if (_modifiedRanges != null)
  405. {
  406. return _modifiedRanges.HasRange(address, size);
  407. }
  408. return false;
  409. }
  410. /// <summary>
  411. /// Clear the dirty range that overlaps with the given region.
  412. /// </summary>
  413. /// <param name="address">Start address of the modified region</param>
  414. /// <param name="size">Size of the modified region</param>
  415. private void ClearDirty(ulong address, ulong size)
  416. {
  417. if (_dirtyStart != ulong.MaxValue)
  418. {
  419. ulong end = address + size;
  420. if (end > _dirtyStart && address < _dirtyEnd)
  421. {
  422. if (address <= _dirtyStart)
  423. {
  424. // Cut off the start.
  425. if (end < _dirtyEnd)
  426. {
  427. _dirtyStart = end;
  428. }
  429. else
  430. {
  431. _dirtyStart = ulong.MaxValue;
  432. }
  433. }
  434. else if (end >= _dirtyEnd)
  435. {
  436. // Cut off the end.
  437. _dirtyEnd = address;
  438. }
  439. // If fully contained, do nothing.
  440. }
  441. }
  442. }
  443. /// <summary>
  444. /// Indicate that a region of the buffer was modified, and must be loaded from memory.
  445. /// </summary>
  446. /// <param name="mAddress">Start address of the modified region</param>
  447. /// <param name="mSize">Size of the modified region</param>
  448. private void RegionModified(ulong mAddress, ulong mSize)
  449. {
  450. if (mAddress < Address)
  451. {
  452. mAddress = Address;
  453. }
  454. ulong maxSize = Address + Size - mAddress;
  455. if (mSize > maxSize)
  456. {
  457. mSize = maxSize;
  458. }
  459. ClearDirty(mAddress, mSize);
  460. if (_modifiedRanges != null)
  461. {
  462. _modifiedRanges.ExcludeModifiedRegions(mAddress, mSize, _loadDelegate);
  463. }
  464. else
  465. {
  466. LoadRegion(mAddress, mSize);
  467. }
  468. }
  469. /// <summary>
  470. /// Load a region of the buffer from memory.
  471. /// </summary>
  472. /// <param name="mAddress">Start address of the modified region</param>
  473. /// <param name="mSize">Size of the modified region</param>
  474. private void LoadRegion(ulong mAddress, ulong mSize)
  475. {
  476. BackingState.RecordSet();
  477. int offset = (int)(mAddress - Address);
  478. _context.Renderer.SetBufferData(Handle, offset, _physicalMemory.GetSpan(mAddress, (int)mSize));
  479. CopyToDependantVirtualBuffers(mAddress, mSize);
  480. }
  481. /// <summary>
  482. /// Force a region of the buffer to be dirty within the memory tracking. Avoids reprotection and nullifies sequence number check.
  483. /// </summary>
  484. /// <param name="mAddress">Start address of the modified region</param>
  485. /// <param name="mSize">Size of the region to force dirty</param>
  486. private void ForceTrackingDirty(ulong mAddress, ulong mSize)
  487. {
  488. if (_useGranular)
  489. {
  490. _memoryTrackingGranular.ForceDirty(mAddress, mSize);
  491. }
  492. else
  493. {
  494. _memoryTracking.ForceDirty();
  495. _sequenceNumber--;
  496. }
  497. }
  498. /// <summary>
  499. /// Force a region of the buffer to be dirty. Avoids reprotection and nullifies sequence number check.
  500. /// </summary>
  501. /// <param name="mAddress">Start address of the modified region</param>
  502. /// <param name="mSize">Size of the region to force dirty</param>
  503. public void ForceDirty(ulong mAddress, ulong mSize)
  504. {
  505. _modifiedRanges?.Clear(mAddress, mSize);
  506. ulong end = mAddress + mSize;
  507. if (_dirtyStart == ulong.MaxValue)
  508. {
  509. _dirtyStart = mAddress;
  510. _dirtyEnd = end;
  511. }
  512. else
  513. {
  514. // Is the new range more than a page away from the existing one?
  515. if ((long)(mAddress - _dirtyEnd) >= (long)MemoryManager.PageSize ||
  516. (long)(_dirtyStart - end) >= (long)MemoryManager.PageSize)
  517. {
  518. ForceTrackingDirty(mAddress, mSize);
  519. }
  520. else
  521. {
  522. _dirtyStart = Math.Min(_dirtyStart, mAddress);
  523. _dirtyEnd = Math.Max(_dirtyEnd, end);
  524. }
  525. }
  526. }
  527. /// <summary>
  528. /// Performs copy of all the buffer data from one buffer to another.
  529. /// </summary>
  530. /// <param name="destination">The destination buffer to copy the data into</param>
  531. /// <param name="dstOffset">The offset of the destination buffer to copy into</param>
  532. public void CopyTo(Buffer destination, int dstOffset)
  533. {
  534. CopyFromDependantVirtualBuffers();
  535. _context.Renderer.Pipeline.CopyBuffer(Handle, destination.Handle, 0, dstOffset, (int)Size);
  536. }
  537. /// <summary>
  538. /// Flushes a range of the buffer.
  539. /// This writes the range data back into guest memory.
  540. /// </summary>
  541. /// <param name="handle">Buffer handle to flush data from</param>
  542. /// <param name="address">Start address of the range</param>
  543. /// <param name="size">Size in bytes of the range</param>
  544. private void FlushImpl(BufferHandle handle, ulong address, ulong size)
  545. {
  546. int offset = (int)(address - Address);
  547. using PinnedSpan<byte> data = _context.Renderer.GetBufferData(handle, offset, (int)size);
  548. // TODO: When write tracking shaders, they will need to be aware of changes in overlapping buffers.
  549. _physicalMemory.WriteUntracked(address, CopyFromDependantVirtualBuffers(data.Get(), address, size));
  550. }
  551. /// <summary>
  552. /// Flushes a range of the buffer.
  553. /// This writes the range data back into guest memory.
  554. /// </summary>
  555. /// <param name="address">Start address of the range</param>
  556. /// <param name="size">Size in bytes of the range</param>
  557. private void FlushImpl(ulong address, ulong size)
  558. {
  559. FlushImpl(Handle, address, size);
  560. }
  561. /// <summary>
  562. /// Flushes a range of the buffer from the most optimal source.
  563. /// This writes the range data back into guest memory.
  564. /// </summary>
  565. /// <param name="address">Start address of the range</param>
  566. /// <param name="size">Size in bytes of the range</param>
  567. /// <param name="syncNumber">Sync number waited for before flushing the data</param>
  568. public void Flush(ulong address, ulong size, ulong syncNumber)
  569. {
  570. BackingState.RecordFlush();
  571. BufferPreFlush preFlush = _preFlush;
  572. if (preFlush != null)
  573. {
  574. preFlush.FlushWithAction(address, size, syncNumber);
  575. }
  576. else
  577. {
  578. FlushImpl(address, size);
  579. }
  580. }
  581. /// <summary>
  582. /// Gets an action that disposes the backing buffer using its current handle.
  583. /// Useful for deleting an old copy of the buffer after the handle changes.
  584. /// </summary>
  585. /// <returns>An action that flushes data from the specified range, using the buffer handle at the time the method is generated</returns>
  586. public Action GetSnapshotDisposeAction()
  587. {
  588. BufferHandle handle = Handle;
  589. BufferPreFlush preFlush = _preFlush;
  590. return () =>
  591. {
  592. _context.Renderer.DeleteBuffer(handle);
  593. preFlush?.Dispose();
  594. };
  595. }
  596. /// <summary>
  597. /// Gets an action that flushes a range of the buffer using its current handle.
  598. /// Useful for flushing data from old copies of the buffer after the handle changes.
  599. /// </summary>
  600. /// <returns>An action that flushes data from the specified range, using the buffer handle at the time the method is generated</returns>
  601. public BufferFlushAction GetSnapshotFlushAction()
  602. {
  603. BufferHandle handle = Handle;
  604. return (ulong address, ulong size, ulong _) =>
  605. {
  606. FlushImpl(handle, address, size);
  607. };
  608. }
  609. /// <summary>
  610. /// Align a given address and size region to page boundaries.
  611. /// </summary>
  612. /// <param name="address">The start address of the region</param>
  613. /// <param name="size">The size of the region</param>
  614. /// <returns>The page aligned address and size</returns>
  615. private static (ulong address, ulong size) PageAlign(ulong address, ulong size)
  616. {
  617. ulong pageMask = MemoryManager.PageMask;
  618. ulong rA = address & ~pageMask;
  619. ulong rS = ((address + size + pageMask) & ~pageMask) - rA;
  620. return (rA, rS);
  621. }
  622. /// <summary>
  623. /// Flush modified ranges of the buffer from another thread.
  624. /// This will flush all modifications made before the active SyncNumber was set, and may block to wait for GPU sync.
  625. /// </summary>
  626. /// <param name="address">Address of the memory action</param>
  627. /// <param name="size">Size in bytes</param>
  628. public void ExternalFlush(ulong address, ulong size)
  629. {
  630. _context.Renderer.BackgroundContextAction(() =>
  631. {
  632. var ranges = _modifiedRanges;
  633. if (ranges != null)
  634. {
  635. (address, size) = PageAlign(address, size);
  636. ranges.WaitForAndFlushRanges(address, size);
  637. }
  638. }, true);
  639. }
  640. /// <summary>
  641. /// An action to be performed when a precise memory access occurs to this resource.
  642. /// For buffers, this skips flush-on-write by punching holes directly into the modified range list.
  643. /// </summary>
  644. /// <param name="address">Address of the memory action</param>
  645. /// <param name="size">Size in bytes</param>
  646. /// <param name="write">True if the access was a write, false otherwise</param>
  647. private bool PreciseAction(ulong address, ulong size, bool write)
  648. {
  649. if (!write)
  650. {
  651. // We only want to skip flush-on-write.
  652. return false;
  653. }
  654. ulong maxAddress = Math.Max(address, Address);
  655. ulong minEndAddress = Math.Min(address + size, Address + Size);
  656. if (maxAddress >= minEndAddress)
  657. {
  658. // Access doesn't overlap.
  659. return false;
  660. }
  661. ForceDirty(maxAddress, minEndAddress - maxAddress);
  662. return true;
  663. }
  664. /// <summary>
  665. /// Called when part of the memory for this buffer has been unmapped.
  666. /// Calls are from non-GPU threads.
  667. /// </summary>
  668. /// <param name="address">Start address of the unmapped region</param>
  669. /// <param name="size">Size of the unmapped region</param>
  670. public void Unmapped(ulong address, ulong size)
  671. {
  672. BufferModifiedRangeList modifiedRanges = _modifiedRanges;
  673. modifiedRanges?.Clear(address, size);
  674. UnmappedSequence++;
  675. }
  676. /// <summary>
  677. /// Adds a virtual buffer dependency, indicating that a virtual buffer depends on data from this buffer.
  678. /// </summary>
  679. /// <param name="virtualBuffer">Dependant virtual buffer</param>
  680. public void AddVirtualDependency(MultiRangeBuffer virtualBuffer)
  681. {
  682. _virtualDependenciesLock.EnterWriteLock();
  683. try
  684. {
  685. (_virtualDependencies ??= new()).Add(virtualBuffer);
  686. }
  687. finally
  688. {
  689. _virtualDependenciesLock.ExitWriteLock();
  690. }
  691. }
  692. /// <summary>
  693. /// Removes a virtual buffer dependency, indicating that a virtual buffer no longer depends on data from this buffer.
  694. /// </summary>
  695. /// <param name="virtualBuffer">Dependant virtual buffer</param>
  696. public void RemoveVirtualDependency(MultiRangeBuffer virtualBuffer)
  697. {
  698. _virtualDependenciesLock.EnterWriteLock();
  699. try
  700. {
  701. if (_virtualDependencies != null)
  702. {
  703. _virtualDependencies.Remove(virtualBuffer);
  704. if (_virtualDependencies.Count == 0)
  705. {
  706. _virtualDependencies = null;
  707. }
  708. }
  709. }
  710. finally
  711. {
  712. _virtualDependenciesLock.ExitWriteLock();
  713. }
  714. }
  715. /// <summary>
  716. /// Copies the buffer data to all virtual buffers that depends on it.
  717. /// </summary>
  718. public void CopyToDependantVirtualBuffers()
  719. {
  720. CopyToDependantVirtualBuffers(Address, Size);
  721. }
  722. /// <summary>
  723. /// Copies the buffer data inside the specifide range to all virtual buffers that depends on it.
  724. /// </summary>
  725. /// <param name="address">Address of the range</param>
  726. /// <param name="size">Size of the range in bytes</param>
  727. public void CopyToDependantVirtualBuffers(ulong address, ulong size)
  728. {
  729. if (_virtualDependencies != null)
  730. {
  731. foreach (var virtualBuffer in _virtualDependencies)
  732. {
  733. CopyToDependantVirtualBuffer(virtualBuffer, address, size);
  734. }
  735. }
  736. }
  737. /// <summary>
  738. /// Copies all modified ranges from all virtual buffers back into this buffer.
  739. /// </summary>
  740. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  741. public void CopyFromDependantVirtualBuffers()
  742. {
  743. if (_virtualDependencies != null)
  744. {
  745. CopyFromDependantVirtualBuffersImpl();
  746. }
  747. }
  748. /// <summary>
  749. /// Copies all modified ranges from all virtual buffers back into this buffer.
  750. /// </summary>
  751. [MethodImpl(MethodImplOptions.NoInlining)]
  752. private void CopyFromDependantVirtualBuffersImpl()
  753. {
  754. foreach (var virtualBuffer in _virtualDependencies.OrderBy(x => x.ModificationSequenceNumber))
  755. {
  756. virtualBuffer.ConsumeModifiedRegion(this, (mAddress, mSize) =>
  757. {
  758. // Get offset inside both this and the virtual buffer.
  759. // Note that sometimes there is no right answer for the virtual offset,
  760. // as the same physical range might be mapped multiple times inside a virtual buffer.
  761. // We just assume it does not happen in practice as it can only be implemented correctly
  762. // when the host has support for proper sparse mapping.
  763. ulong mEndAddress = mAddress + mSize;
  764. mAddress = Math.Max(mAddress, Address);
  765. mSize = Math.Min(mEndAddress, EndAddress) - mAddress;
  766. int physicalOffset = (int)(mAddress - Address);
  767. int virtualOffset = virtualBuffer.Range.FindOffset(new(mAddress, mSize));
  768. _context.Renderer.Pipeline.CopyBuffer(virtualBuffer.Handle, Handle, virtualOffset, physicalOffset, (int)mSize);
  769. });
  770. }
  771. }
  772. /// <summary>
  773. /// Copies all overlapping modified ranges from all virtual buffers back into this buffer, and returns an updated span with the data.
  774. /// </summary>
  775. /// <param name="dataSpan">Span where the unmodified data will be taken from for the output</param>
  776. /// <param name="address">Address of the region to copy</param>
  777. /// <param name="size">Size of the region to copy in bytes</param>
  778. /// <returns>A span with <paramref name="dataSpan"/>, and the data for all modified ranges if any</returns>
  779. private ReadOnlySpan<byte> CopyFromDependantVirtualBuffers(ReadOnlySpan<byte> dataSpan, ulong address, ulong size)
  780. {
  781. _virtualDependenciesLock.EnterReadLock();
  782. try
  783. {
  784. if (_virtualDependencies != null)
  785. {
  786. byte[] storage = dataSpan.ToArray();
  787. foreach (var virtualBuffer in _virtualDependencies.OrderBy(x => x.ModificationSequenceNumber))
  788. {
  789. virtualBuffer.ConsumeModifiedRegion(address, size, (mAddress, mSize) =>
  790. {
  791. // Get offset inside both this and the virtual buffer.
  792. // Note that sometimes there is no right answer for the virtual offset,
  793. // as the same physical range might be mapped multiple times inside a virtual buffer.
  794. // We just assume it does not happen in practice as it can only be implemented correctly
  795. // when the host has support for proper sparse mapping.
  796. ulong mEndAddress = mAddress + mSize;
  797. mAddress = Math.Max(mAddress, address);
  798. mSize = Math.Min(mEndAddress, address + size) - mAddress;
  799. int physicalOffset = (int)(mAddress - Address);
  800. int virtualOffset = virtualBuffer.Range.FindOffset(new(mAddress, mSize));
  801. _context.Renderer.Pipeline.CopyBuffer(virtualBuffer.Handle, Handle, virtualOffset, physicalOffset, (int)size);
  802. virtualBuffer.GetData(storage.AsSpan().Slice((int)(mAddress - address), (int)mSize), virtualOffset, (int)mSize);
  803. });
  804. }
  805. dataSpan = storage;
  806. }
  807. }
  808. finally
  809. {
  810. _virtualDependenciesLock.ExitReadLock();
  811. }
  812. return dataSpan;
  813. }
  814. /// <summary>
  815. /// Copies the buffer data to the specified virtual buffer.
  816. /// </summary>
  817. /// <param name="virtualBuffer">Virtual buffer to copy the data into</param>
  818. public void CopyToDependantVirtualBuffer(MultiRangeBuffer virtualBuffer)
  819. {
  820. CopyToDependantVirtualBuffer(virtualBuffer, Address, Size);
  821. }
  822. /// <summary>
  823. /// Copies the buffer data inside the given range to the specified virtual buffer.
  824. /// </summary>
  825. /// <param name="virtualBuffer">Virtual buffer to copy the data into</param>
  826. /// <param name="address">Address of the range</param>
  827. /// <param name="size">Size of the range in bytes</param>
  828. public void CopyToDependantVirtualBuffer(MultiRangeBuffer virtualBuffer, ulong address, ulong size)
  829. {
  830. // Broadcast data to all ranges of the virtual buffer that are contained inside this buffer.
  831. ulong lastOffset = 0;
  832. while (virtualBuffer.TryGetPhysicalOffset(this, lastOffset, out ulong srcOffset, out ulong dstOffset, out ulong copySize))
  833. {
  834. ulong innerOffset = address - Address;
  835. ulong innerEndOffset = (address + size) - Address;
  836. lastOffset = dstOffset + copySize;
  837. // Clamp range to the specified range.
  838. ulong copySrcOffset = Math.Max(srcOffset, innerOffset);
  839. ulong copySrcEndOffset = Math.Min(innerEndOffset, srcOffset + copySize);
  840. if (copySrcEndOffset > copySrcOffset)
  841. {
  842. copySize = copySrcEndOffset - copySrcOffset;
  843. dstOffset += copySrcOffset - srcOffset;
  844. srcOffset = copySrcOffset;
  845. _context.Renderer.Pipeline.CopyBuffer(Handle, virtualBuffer.Handle, (int)srcOffset, (int)dstOffset, (int)copySize);
  846. }
  847. }
  848. }
  849. /// <summary>
  850. /// Increments the buffer reference count.
  851. /// </summary>
  852. public void IncrementReferenceCount()
  853. {
  854. _referenceCount++;
  855. }
  856. /// <summary>
  857. /// Decrements the buffer reference count.
  858. /// </summary>
  859. public void DecrementReferenceCount()
  860. {
  861. if (--_referenceCount == 0)
  862. {
  863. DisposeData();
  864. }
  865. }
  866. /// <summary>
  867. /// Disposes the host buffer's data, not its tracking handles.
  868. /// </summary>
  869. public void DisposeData()
  870. {
  871. _modifiedRanges?.Clear();
  872. _context.Renderer.DeleteBuffer(Handle);
  873. _preFlush?.Dispose();
  874. _preFlush = null;
  875. UnmappedSequence++;
  876. }
  877. /// <summary>
  878. /// Disposes the host buffer.
  879. /// </summary>
  880. public void Dispose()
  881. {
  882. _memoryTrackingGranular?.Dispose();
  883. _memoryTracking?.Dispose();
  884. DecrementReferenceCount();
  885. }
  886. }
  887. }