KThread.cs 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Numerics;
  8. using System.Threading;
  9. namespace Ryujinx.HLE.HOS.Kernel.Threading
  10. {
  11. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  12. {
  13. public const int MaxWaitSyncObjects = 64;
  14. private ManualResetEvent _schedulerWaitEvent;
  15. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  16. public Thread HostThread { get; private set; }
  17. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  18. public KThreadContext ThreadContext { get; private set; }
  19. public int DynamicPriority { get; set; }
  20. public long AffinityMask { get; set; }
  21. public long ThreadUid { get; private set; }
  22. private long _totalTimeRunning;
  23. public long TotalTimeRunning => _totalTimeRunning;
  24. public KSynchronizationObject SignaledObj { get; set; }
  25. public ulong CondVarAddress { get; set; }
  26. private ulong _entrypoint;
  27. private ThreadStart _customThreadStart;
  28. private bool _forcedUnschedulable;
  29. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  30. public ulong MutexAddress { get; set; }
  31. public KProcess Owner { get; private set; }
  32. private ulong _tlsAddress;
  33. public ulong TlsAddress => _tlsAddress;
  34. public ulong TlsDramAddress { get; private set; }
  35. public KSynchronizationObject[] WaitSyncObjects { get; }
  36. public int[] WaitSyncHandles { get; }
  37. public long LastScheduledTime { get; set; }
  38. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  39. public LinkedList<KThread> Withholder { get; set; }
  40. public LinkedListNode<KThread> WithholderNode { get; set; }
  41. public LinkedListNode<KThread> ProcessListNode { get; set; }
  42. private LinkedList<KThread> _mutexWaiters;
  43. private LinkedListNode<KThread> _mutexWaiterNode;
  44. public KThread MutexOwner { get; private set; }
  45. public int ThreadHandleForUserMutex { get; set; }
  46. private ThreadSchedState _forcePauseFlags;
  47. public KernelResult ObjSyncResult { get; set; }
  48. public int BasePriority { get; set; }
  49. public int PreferredCore { get; set; }
  50. public int CurrentCore { get; set; }
  51. public int ActiveCore { get; set; }
  52. private long _affinityMaskOverride;
  53. private int _preferredCoreOverride;
  54. #pragma warning disable CS0649
  55. private int _affinityOverrideCount;
  56. #pragma warning restore CS0649
  57. public ThreadSchedState SchedFlags { get; private set; }
  58. private int _shallBeTerminated;
  59. public bool ShallBeTerminated
  60. {
  61. get => _shallBeTerminated != 0;
  62. set => _shallBeTerminated = value ? 1 : 0;
  63. }
  64. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  65. public bool SyncCancelled { get; set; }
  66. public bool WaitingSync { get; set; }
  67. private int _hasExited;
  68. private bool _hasBeenInitialized;
  69. private bool _hasBeenReleased;
  70. public bool WaitingInArbitration { get; set; }
  71. public long LastPc { get; set; }
  72. public KThread(KernelContext context) : base(context)
  73. {
  74. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  75. WaitSyncHandles = new int[MaxWaitSyncObjects];
  76. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  77. _mutexWaiters = new LinkedList<KThread>();
  78. }
  79. public KernelResult Initialize(
  80. ulong entrypoint,
  81. ulong argsPtr,
  82. ulong stackTop,
  83. int priority,
  84. int cpuCore,
  85. KProcess owner,
  86. ThreadType type,
  87. ThreadStart customThreadStart = null)
  88. {
  89. if ((uint)type > 3)
  90. {
  91. throw new ArgumentException($"Invalid thread type \"{type}\".");
  92. }
  93. ThreadContext = new KThreadContext();
  94. PreferredCore = cpuCore;
  95. AffinityMask |= 1L << cpuCore;
  96. SchedFlags = type == ThreadType.Dummy
  97. ? ThreadSchedState.Running
  98. : ThreadSchedState.None;
  99. ActiveCore = cpuCore;
  100. ObjSyncResult = KernelResult.ThreadNotStarted;
  101. DynamicPriority = priority;
  102. BasePriority = priority;
  103. CurrentCore = cpuCore;
  104. _entrypoint = entrypoint;
  105. _customThreadStart = customThreadStart;
  106. if (type == ThreadType.User)
  107. {
  108. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  109. {
  110. return KernelResult.OutOfMemory;
  111. }
  112. TlsDramAddress = owner.MemoryManager.GetDramAddressFromVa(_tlsAddress);
  113. MemoryHelper.FillWithZeros(owner.CpuMemory, (long)_tlsAddress, KTlsPageInfo.TlsEntrySize);
  114. }
  115. bool is64Bits;
  116. if (owner != null)
  117. {
  118. Owner = owner;
  119. owner.IncrementReferenceCount();
  120. owner.IncrementThreadCount();
  121. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  122. }
  123. else
  124. {
  125. is64Bits = true;
  126. }
  127. HostThread = new Thread(ThreadStart);
  128. Context = CpuContext.CreateExecutionContext();
  129. Context.IsAarch32 = !is64Bits;
  130. Context.SetX(0, argsPtr);
  131. if (is64Bits)
  132. {
  133. Context.SetX(31, stackTop);
  134. }
  135. else
  136. {
  137. Context.SetX(13, (uint)stackTop);
  138. }
  139. Context.CntfrqEl0 = 19200000;
  140. Context.Tpidr = (long)_tlsAddress;
  141. ThreadUid = KernelContext.NewThreadUid();
  142. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  143. _hasBeenInitialized = true;
  144. if (owner != null)
  145. {
  146. owner.SubscribeThreadEventHandlers(Context);
  147. owner.AddThread(this);
  148. if (owner.IsPaused)
  149. {
  150. KernelContext.CriticalSection.Enter();
  151. if (TerminationRequested)
  152. {
  153. KernelContext.CriticalSection.Leave();
  154. return KernelResult.Success;
  155. }
  156. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  157. CombineForcePauseFlags();
  158. KernelContext.CriticalSection.Leave();
  159. }
  160. }
  161. return KernelResult.Success;
  162. }
  163. public KernelResult Start()
  164. {
  165. if (!KernelContext.KernelInitialized)
  166. {
  167. KernelContext.CriticalSection.Enter();
  168. if (!TerminationRequested)
  169. {
  170. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  171. CombineForcePauseFlags();
  172. }
  173. KernelContext.CriticalSection.Leave();
  174. }
  175. KernelResult result = KernelResult.ThreadTerminating;
  176. KernelContext.CriticalSection.Enter();
  177. if (!ShallBeTerminated)
  178. {
  179. KThread currentThread = KernelStatic.GetCurrentThread();
  180. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  181. {
  182. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  183. {
  184. result = KernelResult.InvalidState;
  185. break;
  186. }
  187. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  188. {
  189. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  190. {
  191. CombineForcePauseFlags();
  192. }
  193. SetNewSchedFlags(ThreadSchedState.Running);
  194. StartHostThread();
  195. result = KernelResult.Success;
  196. break;
  197. }
  198. else
  199. {
  200. currentThread.CombineForcePauseFlags();
  201. KernelContext.CriticalSection.Leave();
  202. KernelContext.CriticalSection.Enter();
  203. if (currentThread.ShallBeTerminated)
  204. {
  205. break;
  206. }
  207. }
  208. }
  209. }
  210. KernelContext.CriticalSection.Leave();
  211. return result;
  212. }
  213. public ThreadSchedState PrepareForTermination()
  214. {
  215. KernelContext.CriticalSection.Enter();
  216. ThreadSchedState result;
  217. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  218. {
  219. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  220. {
  221. SchedFlags = ThreadSchedState.TerminationPending;
  222. }
  223. else
  224. {
  225. if (_forcePauseFlags != ThreadSchedState.None)
  226. {
  227. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  228. ThreadSchedState oldSchedFlags = SchedFlags;
  229. SchedFlags &= ThreadSchedState.LowMask;
  230. AdjustScheduling(oldSchedFlags);
  231. }
  232. if (BasePriority >= 0x10)
  233. {
  234. SetPriority(0xF);
  235. }
  236. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  237. {
  238. // TODO: GIC distributor stuffs (sgir changes ect)
  239. Context.RequestInterrupt();
  240. }
  241. SignaledObj = null;
  242. ObjSyncResult = KernelResult.ThreadTerminating;
  243. ReleaseAndResume();
  244. }
  245. }
  246. result = SchedFlags;
  247. KernelContext.CriticalSection.Leave();
  248. return result & ThreadSchedState.LowMask;
  249. }
  250. public void Terminate()
  251. {
  252. ThreadSchedState state = PrepareForTermination();
  253. if (state != ThreadSchedState.TerminationPending)
  254. {
  255. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  256. }
  257. }
  258. public void HandlePostSyscall()
  259. {
  260. ThreadSchedState state;
  261. do
  262. {
  263. if (TerminationRequested)
  264. {
  265. Exit();
  266. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  267. break;
  268. }
  269. KernelContext.CriticalSection.Enter();
  270. if (TerminationRequested)
  271. {
  272. state = ThreadSchedState.TerminationPending;
  273. }
  274. else
  275. {
  276. if (_forcePauseFlags != ThreadSchedState.None)
  277. {
  278. CombineForcePauseFlags();
  279. }
  280. state = ThreadSchedState.Running;
  281. }
  282. KernelContext.CriticalSection.Leave();
  283. } while (state == ThreadSchedState.TerminationPending);
  284. }
  285. public void Exit()
  286. {
  287. // TODO: Debug event.
  288. if (Owner != null)
  289. {
  290. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  291. _hasBeenReleased = true;
  292. }
  293. KernelContext.CriticalSection.Enter();
  294. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  295. bool decRef = ExitImpl();
  296. Context.StopRunning();
  297. KernelContext.CriticalSection.Leave();
  298. if (decRef)
  299. {
  300. DecrementReferenceCount();
  301. }
  302. }
  303. private bool ExitImpl()
  304. {
  305. KernelContext.CriticalSection.Enter();
  306. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  307. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  308. Signal();
  309. KernelContext.CriticalSection.Leave();
  310. return decRef;
  311. }
  312. public KernelResult Sleep(long timeout)
  313. {
  314. KernelContext.CriticalSection.Enter();
  315. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  316. {
  317. KernelContext.CriticalSection.Leave();
  318. return KernelResult.ThreadTerminating;
  319. }
  320. SetNewSchedFlags(ThreadSchedState.Paused);
  321. if (timeout > 0)
  322. {
  323. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  324. }
  325. KernelContext.CriticalSection.Leave();
  326. if (timeout > 0)
  327. {
  328. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  329. }
  330. return 0;
  331. }
  332. public void SetPriority(int priority)
  333. {
  334. KernelContext.CriticalSection.Enter();
  335. BasePriority = priority;
  336. UpdatePriorityInheritance();
  337. KernelContext.CriticalSection.Leave();
  338. }
  339. public KernelResult SetActivity(bool pause)
  340. {
  341. KernelResult result = KernelResult.Success;
  342. KernelContext.CriticalSection.Enter();
  343. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  344. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  345. {
  346. KernelContext.CriticalSection.Leave();
  347. return KernelResult.InvalidState;
  348. }
  349. KernelContext.CriticalSection.Enter();
  350. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  351. {
  352. if (pause)
  353. {
  354. // Pause, the force pause flag should be clear (thread is NOT paused).
  355. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  356. {
  357. _forcePauseFlags |= ThreadSchedState.ThreadPauseFlag;
  358. CombineForcePauseFlags();
  359. }
  360. else
  361. {
  362. result = KernelResult.InvalidState;
  363. }
  364. }
  365. else
  366. {
  367. // Unpause, the force pause flag should be set (thread is paused).
  368. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  369. {
  370. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  371. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  372. if ((oldForcePauseFlags & ~ThreadSchedState.ThreadPauseFlag) == ThreadSchedState.None)
  373. {
  374. ThreadSchedState oldSchedFlags = SchedFlags;
  375. SchedFlags &= ThreadSchedState.LowMask;
  376. AdjustScheduling(oldSchedFlags);
  377. }
  378. }
  379. else
  380. {
  381. result = KernelResult.InvalidState;
  382. }
  383. }
  384. }
  385. KernelContext.CriticalSection.Leave();
  386. KernelContext.CriticalSection.Leave();
  387. return result;
  388. }
  389. public void CancelSynchronization()
  390. {
  391. KernelContext.CriticalSection.Enter();
  392. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  393. {
  394. SyncCancelled = true;
  395. }
  396. else if (Withholder != null)
  397. {
  398. Withholder.Remove(WithholderNode);
  399. SetNewSchedFlags(ThreadSchedState.Running);
  400. Withholder = null;
  401. SyncCancelled = true;
  402. }
  403. else
  404. {
  405. SignaledObj = null;
  406. ObjSyncResult = KernelResult.Cancelled;
  407. SetNewSchedFlags(ThreadSchedState.Running);
  408. SyncCancelled = false;
  409. }
  410. KernelContext.CriticalSection.Leave();
  411. }
  412. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  413. {
  414. KernelContext.CriticalSection.Enter();
  415. bool useOverride = _affinityOverrideCount != 0;
  416. // The value -3 is "do not change the preferred core".
  417. if (newCore == -3)
  418. {
  419. newCore = useOverride ? _preferredCoreOverride : PreferredCore;
  420. if ((newAffinityMask & (1 << newCore)) == 0)
  421. {
  422. KernelContext.CriticalSection.Leave();
  423. return KernelResult.InvalidCombination;
  424. }
  425. }
  426. if (useOverride)
  427. {
  428. _preferredCoreOverride = newCore;
  429. _affinityMaskOverride = newAffinityMask;
  430. }
  431. else
  432. {
  433. long oldAffinityMask = AffinityMask;
  434. PreferredCore = newCore;
  435. AffinityMask = newAffinityMask;
  436. if (oldAffinityMask != newAffinityMask)
  437. {
  438. int oldCore = ActiveCore;
  439. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  440. {
  441. if (PreferredCore < 0)
  442. {
  443. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  444. }
  445. else
  446. {
  447. ActiveCore = PreferredCore;
  448. }
  449. }
  450. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  451. }
  452. }
  453. KernelContext.CriticalSection.Leave();
  454. return KernelResult.Success;
  455. }
  456. private void CombineForcePauseFlags()
  457. {
  458. ThreadSchedState oldFlags = SchedFlags;
  459. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  460. SchedFlags = lowNibble | _forcePauseFlags;
  461. AdjustScheduling(oldFlags);
  462. }
  463. private void SetNewSchedFlags(ThreadSchedState newFlags)
  464. {
  465. KernelContext.CriticalSection.Enter();
  466. ThreadSchedState oldFlags = SchedFlags;
  467. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  468. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  469. {
  470. AdjustScheduling(oldFlags);
  471. }
  472. KernelContext.CriticalSection.Leave();
  473. }
  474. public void ReleaseAndResume()
  475. {
  476. KernelContext.CriticalSection.Enter();
  477. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  478. {
  479. if (Withholder != null)
  480. {
  481. Withholder.Remove(WithholderNode);
  482. SetNewSchedFlags(ThreadSchedState.Running);
  483. Withholder = null;
  484. }
  485. else
  486. {
  487. SetNewSchedFlags(ThreadSchedState.Running);
  488. }
  489. }
  490. KernelContext.CriticalSection.Leave();
  491. }
  492. public void Reschedule(ThreadSchedState newFlags)
  493. {
  494. KernelContext.CriticalSection.Enter();
  495. ThreadSchedState oldFlags = SchedFlags;
  496. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  497. (newFlags & ThreadSchedState.LowMask);
  498. AdjustScheduling(oldFlags);
  499. KernelContext.CriticalSection.Leave();
  500. }
  501. public void AddMutexWaiter(KThread requester)
  502. {
  503. AddToMutexWaitersList(requester);
  504. requester.MutexOwner = this;
  505. UpdatePriorityInheritance();
  506. }
  507. public void RemoveMutexWaiter(KThread thread)
  508. {
  509. if (thread._mutexWaiterNode?.List != null)
  510. {
  511. _mutexWaiters.Remove(thread._mutexWaiterNode);
  512. }
  513. thread.MutexOwner = null;
  514. UpdatePriorityInheritance();
  515. }
  516. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  517. {
  518. count = 0;
  519. if (_mutexWaiters.First == null)
  520. {
  521. return null;
  522. }
  523. KThread newMutexOwner = null;
  524. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  525. do
  526. {
  527. // Skip all threads that are not waiting for this mutex.
  528. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  529. {
  530. currentNode = currentNode.Next;
  531. }
  532. if (currentNode == null)
  533. {
  534. break;
  535. }
  536. LinkedListNode<KThread> nextNode = currentNode.Next;
  537. _mutexWaiters.Remove(currentNode);
  538. currentNode.Value.MutexOwner = newMutexOwner;
  539. if (newMutexOwner != null)
  540. {
  541. // New owner was already selected, re-insert on new owner list.
  542. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  543. }
  544. else
  545. {
  546. // New owner not selected yet, use current thread.
  547. newMutexOwner = currentNode.Value;
  548. }
  549. count++;
  550. currentNode = nextNode;
  551. }
  552. while (currentNode != null);
  553. if (newMutexOwner != null)
  554. {
  555. UpdatePriorityInheritance();
  556. newMutexOwner.UpdatePriorityInheritance();
  557. }
  558. return newMutexOwner;
  559. }
  560. private void UpdatePriorityInheritance()
  561. {
  562. // If any of the threads waiting for the mutex has
  563. // higher priority than the current thread, then
  564. // the current thread inherits that priority.
  565. int highestPriority = BasePriority;
  566. if (_mutexWaiters.First != null)
  567. {
  568. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  569. if (waitingDynamicPriority < highestPriority)
  570. {
  571. highestPriority = waitingDynamicPriority;
  572. }
  573. }
  574. if (highestPriority != DynamicPriority)
  575. {
  576. int oldPriority = DynamicPriority;
  577. DynamicPriority = highestPriority;
  578. AdjustSchedulingForNewPriority(oldPriority);
  579. if (MutexOwner != null)
  580. {
  581. // Remove and re-insert to ensure proper sorting based on new priority.
  582. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  583. MutexOwner.AddToMutexWaitersList(this);
  584. MutexOwner.UpdatePriorityInheritance();
  585. }
  586. }
  587. }
  588. private void AddToMutexWaitersList(KThread thread)
  589. {
  590. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  591. int currentPriority = thread.DynamicPriority;
  592. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  593. {
  594. nextPrio = nextPrio.Next;
  595. }
  596. if (nextPrio != null)
  597. {
  598. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  599. }
  600. else
  601. {
  602. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  603. }
  604. }
  605. private void AdjustScheduling(ThreadSchedState oldFlags)
  606. {
  607. if (oldFlags == SchedFlags)
  608. {
  609. return;
  610. }
  611. if (!IsSchedulable)
  612. {
  613. // Ensure our thread is running and we have an event.
  614. StartHostThread();
  615. // If the thread is not schedulable, we want to just run or pause
  616. // it directly as we don't care about priority or the core it is
  617. // running on in this case.
  618. if (SchedFlags == ThreadSchedState.Running)
  619. {
  620. _schedulerWaitEvent.Set();
  621. }
  622. else
  623. {
  624. _schedulerWaitEvent.Reset();
  625. }
  626. return;
  627. }
  628. if (oldFlags == ThreadSchedState.Running)
  629. {
  630. // Was running, now it's stopped.
  631. if (ActiveCore >= 0)
  632. {
  633. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  634. }
  635. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  636. {
  637. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  638. {
  639. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  640. }
  641. }
  642. }
  643. else if (SchedFlags == ThreadSchedState.Running)
  644. {
  645. // Was stopped, now it's running.
  646. if (ActiveCore >= 0)
  647. {
  648. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  649. }
  650. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  651. {
  652. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  653. {
  654. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  655. }
  656. }
  657. }
  658. KernelContext.ThreadReselectionRequested = true;
  659. }
  660. private void AdjustSchedulingForNewPriority(int oldPriority)
  661. {
  662. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  663. {
  664. return;
  665. }
  666. // Remove thread from the old priority queues.
  667. if (ActiveCore >= 0)
  668. {
  669. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  670. }
  671. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  672. {
  673. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  674. {
  675. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  676. }
  677. }
  678. // Add thread to the new priority queues.
  679. KThread currentThread = KernelStatic.GetCurrentThread();
  680. if (ActiveCore >= 0)
  681. {
  682. if (currentThread == this)
  683. {
  684. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  685. }
  686. else
  687. {
  688. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  689. }
  690. }
  691. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  692. {
  693. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  694. {
  695. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  696. }
  697. }
  698. KernelContext.ThreadReselectionRequested = true;
  699. }
  700. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  701. {
  702. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  703. {
  704. return;
  705. }
  706. // Remove thread from the old priority queues.
  707. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  708. {
  709. if (((oldAffinityMask >> core) & 1) != 0)
  710. {
  711. if (core == oldCore)
  712. {
  713. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  714. }
  715. else
  716. {
  717. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  718. }
  719. }
  720. }
  721. // Add thread to the new priority queues.
  722. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  723. {
  724. if (((AffinityMask >> core) & 1) != 0)
  725. {
  726. if (core == ActiveCore)
  727. {
  728. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  729. }
  730. else
  731. {
  732. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  733. }
  734. }
  735. }
  736. KernelContext.ThreadReselectionRequested = true;
  737. }
  738. public void SetEntryArguments(long argsPtr, int threadHandle)
  739. {
  740. Context.SetX(0, (ulong)argsPtr);
  741. Context.SetX(1, (ulong)threadHandle);
  742. }
  743. public void TimeUp()
  744. {
  745. ReleaseAndResume();
  746. }
  747. public string GetGuestStackTrace()
  748. {
  749. return Owner.Debugger.GetGuestStackTrace(Context);
  750. }
  751. public void PrintGuestStackTrace()
  752. {
  753. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  754. }
  755. public void AddCpuTime(long ticks)
  756. {
  757. Interlocked.Add(ref _totalTimeRunning, ticks);
  758. }
  759. public void StartHostThread()
  760. {
  761. if (_schedulerWaitEvent == null)
  762. {
  763. var schedulerWaitEvent = new ManualResetEvent(false);
  764. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  765. {
  766. HostThread.Start();
  767. }
  768. else
  769. {
  770. schedulerWaitEvent.Dispose();
  771. }
  772. }
  773. }
  774. private void ThreadStart()
  775. {
  776. _schedulerWaitEvent.WaitOne();
  777. KernelStatic.SetKernelContext(KernelContext, this);
  778. if (_customThreadStart != null)
  779. {
  780. _customThreadStart();
  781. }
  782. else
  783. {
  784. Owner.Context.Execute(Context, _entrypoint);
  785. }
  786. Context.Dispose();
  787. _schedulerWaitEvent.Dispose();
  788. }
  789. public void MakeUnschedulable()
  790. {
  791. _forcedUnschedulable = true;
  792. }
  793. public override bool IsSignaled()
  794. {
  795. return _hasExited != 0;
  796. }
  797. protected override void Destroy()
  798. {
  799. if (_hasBeenInitialized)
  800. {
  801. FreeResources();
  802. bool released = Owner != null || _hasBeenReleased;
  803. if (Owner != null)
  804. {
  805. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  806. Owner.DecrementReferenceCount();
  807. }
  808. else
  809. {
  810. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  811. }
  812. }
  813. }
  814. private void FreeResources()
  815. {
  816. Owner?.RemoveThread(this);
  817. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  818. {
  819. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  820. }
  821. KernelContext.CriticalSection.Enter();
  822. // Wake up all threads that may be waiting for a mutex being held by this thread.
  823. foreach (KThread thread in _mutexWaiters)
  824. {
  825. thread.MutexOwner = null;
  826. thread._preferredCoreOverride = 0;
  827. thread.ObjSyncResult = KernelResult.InvalidState;
  828. thread.ReleaseAndResume();
  829. }
  830. KernelContext.CriticalSection.Leave();
  831. Owner?.DecrementThreadCountAndTerminateIfZero();
  832. }
  833. }
  834. }