KThread.cs 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Numerics;
  8. using System.Threading;
  9. namespace Ryujinx.HLE.HOS.Kernel.Threading
  10. {
  11. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  12. {
  13. public const int MaxWaitSyncObjects = 64;
  14. private ManualResetEvent _schedulerWaitEvent;
  15. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  16. public Thread HostThread { get; private set; }
  17. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  18. public KThreadContext ThreadContext { get; private set; }
  19. public int DynamicPriority { get; set; }
  20. public long AffinityMask { get; set; }
  21. public long ThreadUid { get; private set; }
  22. private long _totalTimeRunning;
  23. public long TotalTimeRunning => _totalTimeRunning;
  24. public KSynchronizationObject SignaledObj { get; set; }
  25. public ulong CondVarAddress { get; set; }
  26. private ulong _entrypoint;
  27. private ThreadStart _customThreadStart;
  28. private bool _forcedUnschedulable;
  29. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  30. public ulong MutexAddress { get; set; }
  31. public KProcess Owner { get; private set; }
  32. private ulong _tlsAddress;
  33. public ulong TlsAddress => _tlsAddress;
  34. public KSynchronizationObject[] WaitSyncObjects { get; }
  35. public int[] WaitSyncHandles { get; }
  36. public long LastScheduledTime { get; set; }
  37. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  38. public LinkedList<KThread> Withholder { get; set; }
  39. public LinkedListNode<KThread> WithholderNode { get; set; }
  40. public LinkedListNode<KThread> ProcessListNode { get; set; }
  41. private LinkedList<KThread> _mutexWaiters;
  42. private LinkedListNode<KThread> _mutexWaiterNode;
  43. public KThread MutexOwner { get; private set; }
  44. public int ThreadHandleForUserMutex { get; set; }
  45. private ThreadSchedState _forcePauseFlags;
  46. public KernelResult ObjSyncResult { get; set; }
  47. public int BasePriority { get; set; }
  48. public int PreferredCore { get; set; }
  49. public int CurrentCore { get; set; }
  50. public int ActiveCore { get; set; }
  51. private long _affinityMaskOverride;
  52. private int _preferredCoreOverride;
  53. #pragma warning disable CS0649
  54. private int _affinityOverrideCount;
  55. #pragma warning restore CS0649
  56. public ThreadSchedState SchedFlags { get; private set; }
  57. private int _shallBeTerminated;
  58. public bool ShallBeTerminated
  59. {
  60. get => _shallBeTerminated != 0;
  61. set => _shallBeTerminated = value ? 1 : 0;
  62. }
  63. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  64. public bool SyncCancelled { get; set; }
  65. public bool WaitingSync { get; set; }
  66. private int _hasExited;
  67. private bool _hasBeenInitialized;
  68. private bool _hasBeenReleased;
  69. public bool WaitingInArbitration { get; set; }
  70. public long LastPc { get; set; }
  71. public KThread(KernelContext context) : base(context)
  72. {
  73. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  74. WaitSyncHandles = new int[MaxWaitSyncObjects];
  75. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  76. _mutexWaiters = new LinkedList<KThread>();
  77. }
  78. public KernelResult Initialize(
  79. ulong entrypoint,
  80. ulong argsPtr,
  81. ulong stackTop,
  82. int priority,
  83. int cpuCore,
  84. KProcess owner,
  85. ThreadType type,
  86. ThreadStart customThreadStart = null)
  87. {
  88. if ((uint)type > 3)
  89. {
  90. throw new ArgumentException($"Invalid thread type \"{type}\".");
  91. }
  92. ThreadContext = new KThreadContext();
  93. PreferredCore = cpuCore;
  94. AffinityMask |= 1L << cpuCore;
  95. SchedFlags = type == ThreadType.Dummy
  96. ? ThreadSchedState.Running
  97. : ThreadSchedState.None;
  98. ActiveCore = cpuCore;
  99. ObjSyncResult = KernelResult.ThreadNotStarted;
  100. DynamicPriority = priority;
  101. BasePriority = priority;
  102. CurrentCore = cpuCore;
  103. _entrypoint = entrypoint;
  104. _customThreadStart = customThreadStart;
  105. if (type == ThreadType.User)
  106. {
  107. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  108. {
  109. return KernelResult.OutOfMemory;
  110. }
  111. MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
  112. }
  113. bool is64Bits;
  114. if (owner != null)
  115. {
  116. Owner = owner;
  117. owner.IncrementReferenceCount();
  118. owner.IncrementThreadCount();
  119. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  120. }
  121. else
  122. {
  123. is64Bits = true;
  124. }
  125. HostThread = new Thread(ThreadStart);
  126. Context = CpuContext.CreateExecutionContext();
  127. Context.IsAarch32 = !is64Bits;
  128. Context.SetX(0, argsPtr);
  129. if (is64Bits)
  130. {
  131. Context.SetX(18, KSystemControl.GenerateRandom() | 1);
  132. Context.SetX(31, stackTop);
  133. }
  134. else
  135. {
  136. Context.SetX(13, (uint)stackTop);
  137. }
  138. Context.CntfrqEl0 = 19200000;
  139. Context.Tpidr = (long)_tlsAddress;
  140. ThreadUid = KernelContext.NewThreadUid();
  141. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  142. _hasBeenInitialized = true;
  143. if (owner != null)
  144. {
  145. owner.SubscribeThreadEventHandlers(Context);
  146. owner.AddThread(this);
  147. if (owner.IsPaused)
  148. {
  149. KernelContext.CriticalSection.Enter();
  150. if (TerminationRequested)
  151. {
  152. KernelContext.CriticalSection.Leave();
  153. return KernelResult.Success;
  154. }
  155. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  156. CombineForcePauseFlags();
  157. KernelContext.CriticalSection.Leave();
  158. }
  159. }
  160. return KernelResult.Success;
  161. }
  162. public KernelResult Start()
  163. {
  164. if (!KernelContext.KernelInitialized)
  165. {
  166. KernelContext.CriticalSection.Enter();
  167. if (!TerminationRequested)
  168. {
  169. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  170. CombineForcePauseFlags();
  171. }
  172. KernelContext.CriticalSection.Leave();
  173. }
  174. KernelResult result = KernelResult.ThreadTerminating;
  175. KernelContext.CriticalSection.Enter();
  176. if (!ShallBeTerminated)
  177. {
  178. KThread currentThread = KernelStatic.GetCurrentThread();
  179. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  180. {
  181. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  182. {
  183. result = KernelResult.InvalidState;
  184. break;
  185. }
  186. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  187. {
  188. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  189. {
  190. CombineForcePauseFlags();
  191. }
  192. SetNewSchedFlags(ThreadSchedState.Running);
  193. StartHostThread();
  194. result = KernelResult.Success;
  195. break;
  196. }
  197. else
  198. {
  199. currentThread.CombineForcePauseFlags();
  200. KernelContext.CriticalSection.Leave();
  201. KernelContext.CriticalSection.Enter();
  202. if (currentThread.ShallBeTerminated)
  203. {
  204. break;
  205. }
  206. }
  207. }
  208. }
  209. KernelContext.CriticalSection.Leave();
  210. return result;
  211. }
  212. public ThreadSchedState PrepareForTermination()
  213. {
  214. KernelContext.CriticalSection.Enter();
  215. ThreadSchedState result;
  216. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  217. {
  218. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  219. {
  220. SchedFlags = ThreadSchedState.TerminationPending;
  221. }
  222. else
  223. {
  224. if (_forcePauseFlags != ThreadSchedState.None)
  225. {
  226. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  227. ThreadSchedState oldSchedFlags = SchedFlags;
  228. SchedFlags &= ThreadSchedState.LowMask;
  229. AdjustScheduling(oldSchedFlags);
  230. }
  231. if (BasePriority >= 0x10)
  232. {
  233. SetPriority(0xF);
  234. }
  235. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  236. {
  237. // TODO: GIC distributor stuffs (sgir changes ect)
  238. Context.RequestInterrupt();
  239. }
  240. SignaledObj = null;
  241. ObjSyncResult = KernelResult.ThreadTerminating;
  242. ReleaseAndResume();
  243. }
  244. }
  245. result = SchedFlags;
  246. KernelContext.CriticalSection.Leave();
  247. return result & ThreadSchedState.LowMask;
  248. }
  249. public void Terminate()
  250. {
  251. ThreadSchedState state = PrepareForTermination();
  252. if (state != ThreadSchedState.TerminationPending)
  253. {
  254. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  255. }
  256. }
  257. public void HandlePostSyscall()
  258. {
  259. ThreadSchedState state;
  260. do
  261. {
  262. if (TerminationRequested)
  263. {
  264. Exit();
  265. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  266. break;
  267. }
  268. KernelContext.CriticalSection.Enter();
  269. if (TerminationRequested)
  270. {
  271. state = ThreadSchedState.TerminationPending;
  272. }
  273. else
  274. {
  275. if (_forcePauseFlags != ThreadSchedState.None)
  276. {
  277. CombineForcePauseFlags();
  278. }
  279. state = ThreadSchedState.Running;
  280. }
  281. KernelContext.CriticalSection.Leave();
  282. } while (state == ThreadSchedState.TerminationPending);
  283. }
  284. public void Exit()
  285. {
  286. // TODO: Debug event.
  287. if (Owner != null)
  288. {
  289. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  290. _hasBeenReleased = true;
  291. }
  292. KernelContext.CriticalSection.Enter();
  293. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  294. bool decRef = ExitImpl();
  295. Context.StopRunning();
  296. KernelContext.CriticalSection.Leave();
  297. if (decRef)
  298. {
  299. DecrementReferenceCount();
  300. }
  301. }
  302. private bool ExitImpl()
  303. {
  304. KernelContext.CriticalSection.Enter();
  305. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  306. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  307. Signal();
  308. KernelContext.CriticalSection.Leave();
  309. return decRef;
  310. }
  311. public KernelResult Sleep(long timeout)
  312. {
  313. KernelContext.CriticalSection.Enter();
  314. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  315. {
  316. KernelContext.CriticalSection.Leave();
  317. return KernelResult.ThreadTerminating;
  318. }
  319. SetNewSchedFlags(ThreadSchedState.Paused);
  320. if (timeout > 0)
  321. {
  322. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  323. }
  324. KernelContext.CriticalSection.Leave();
  325. if (timeout > 0)
  326. {
  327. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  328. }
  329. return 0;
  330. }
  331. public void SetPriority(int priority)
  332. {
  333. KernelContext.CriticalSection.Enter();
  334. BasePriority = priority;
  335. UpdatePriorityInheritance();
  336. KernelContext.CriticalSection.Leave();
  337. }
  338. public void Suspend(ThreadSchedState type)
  339. {
  340. _forcePauseFlags |= type;
  341. CombineForcePauseFlags();
  342. }
  343. public void Resume(ThreadSchedState type)
  344. {
  345. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  346. _forcePauseFlags &= ~type;
  347. if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
  348. {
  349. ThreadSchedState oldSchedFlags = SchedFlags;
  350. SchedFlags &= ThreadSchedState.LowMask;
  351. AdjustScheduling(oldSchedFlags);
  352. }
  353. }
  354. public KernelResult SetActivity(bool pause)
  355. {
  356. KernelResult result = KernelResult.Success;
  357. KernelContext.CriticalSection.Enter();
  358. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  359. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  360. {
  361. KernelContext.CriticalSection.Leave();
  362. return KernelResult.InvalidState;
  363. }
  364. KernelContext.CriticalSection.Enter();
  365. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  366. {
  367. if (pause)
  368. {
  369. // Pause, the force pause flag should be clear (thread is NOT paused).
  370. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  371. {
  372. Suspend(ThreadSchedState.ThreadPauseFlag);
  373. }
  374. else
  375. {
  376. result = KernelResult.InvalidState;
  377. }
  378. }
  379. else
  380. {
  381. // Unpause, the force pause flag should be set (thread is paused).
  382. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  383. {
  384. Resume(ThreadSchedState.ThreadPauseFlag);
  385. }
  386. else
  387. {
  388. result = KernelResult.InvalidState;
  389. }
  390. }
  391. }
  392. KernelContext.CriticalSection.Leave();
  393. KernelContext.CriticalSection.Leave();
  394. return result;
  395. }
  396. public void CancelSynchronization()
  397. {
  398. KernelContext.CriticalSection.Enter();
  399. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  400. {
  401. SyncCancelled = true;
  402. }
  403. else if (Withholder != null)
  404. {
  405. Withholder.Remove(WithholderNode);
  406. SetNewSchedFlags(ThreadSchedState.Running);
  407. Withholder = null;
  408. SyncCancelled = true;
  409. }
  410. else
  411. {
  412. SignaledObj = null;
  413. ObjSyncResult = KernelResult.Cancelled;
  414. SetNewSchedFlags(ThreadSchedState.Running);
  415. SyncCancelled = false;
  416. }
  417. KernelContext.CriticalSection.Leave();
  418. }
  419. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  420. {
  421. KernelContext.CriticalSection.Enter();
  422. bool useOverride = _affinityOverrideCount != 0;
  423. // The value -3 is "do not change the preferred core".
  424. if (newCore == -3)
  425. {
  426. newCore = useOverride ? _preferredCoreOverride : PreferredCore;
  427. if ((newAffinityMask & (1 << newCore)) == 0)
  428. {
  429. KernelContext.CriticalSection.Leave();
  430. return KernelResult.InvalidCombination;
  431. }
  432. }
  433. if (useOverride)
  434. {
  435. _preferredCoreOverride = newCore;
  436. _affinityMaskOverride = newAffinityMask;
  437. }
  438. else
  439. {
  440. long oldAffinityMask = AffinityMask;
  441. PreferredCore = newCore;
  442. AffinityMask = newAffinityMask;
  443. if (oldAffinityMask != newAffinityMask)
  444. {
  445. int oldCore = ActiveCore;
  446. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  447. {
  448. if (PreferredCore < 0)
  449. {
  450. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  451. }
  452. else
  453. {
  454. ActiveCore = PreferredCore;
  455. }
  456. }
  457. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  458. }
  459. }
  460. KernelContext.CriticalSection.Leave();
  461. return KernelResult.Success;
  462. }
  463. private void CombineForcePauseFlags()
  464. {
  465. ThreadSchedState oldFlags = SchedFlags;
  466. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  467. SchedFlags = lowNibble | _forcePauseFlags;
  468. AdjustScheduling(oldFlags);
  469. }
  470. private void SetNewSchedFlags(ThreadSchedState newFlags)
  471. {
  472. KernelContext.CriticalSection.Enter();
  473. ThreadSchedState oldFlags = SchedFlags;
  474. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  475. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  476. {
  477. AdjustScheduling(oldFlags);
  478. }
  479. KernelContext.CriticalSection.Leave();
  480. }
  481. public void ReleaseAndResume()
  482. {
  483. KernelContext.CriticalSection.Enter();
  484. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  485. {
  486. if (Withholder != null)
  487. {
  488. Withholder.Remove(WithholderNode);
  489. SetNewSchedFlags(ThreadSchedState.Running);
  490. Withholder = null;
  491. }
  492. else
  493. {
  494. SetNewSchedFlags(ThreadSchedState.Running);
  495. }
  496. }
  497. KernelContext.CriticalSection.Leave();
  498. }
  499. public void Reschedule(ThreadSchedState newFlags)
  500. {
  501. KernelContext.CriticalSection.Enter();
  502. ThreadSchedState oldFlags = SchedFlags;
  503. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  504. (newFlags & ThreadSchedState.LowMask);
  505. AdjustScheduling(oldFlags);
  506. KernelContext.CriticalSection.Leave();
  507. }
  508. public void AddMutexWaiter(KThread requester)
  509. {
  510. AddToMutexWaitersList(requester);
  511. requester.MutexOwner = this;
  512. UpdatePriorityInheritance();
  513. }
  514. public void RemoveMutexWaiter(KThread thread)
  515. {
  516. if (thread._mutexWaiterNode?.List != null)
  517. {
  518. _mutexWaiters.Remove(thread._mutexWaiterNode);
  519. }
  520. thread.MutexOwner = null;
  521. UpdatePriorityInheritance();
  522. }
  523. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  524. {
  525. count = 0;
  526. if (_mutexWaiters.First == null)
  527. {
  528. return null;
  529. }
  530. KThread newMutexOwner = null;
  531. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  532. do
  533. {
  534. // Skip all threads that are not waiting for this mutex.
  535. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  536. {
  537. currentNode = currentNode.Next;
  538. }
  539. if (currentNode == null)
  540. {
  541. break;
  542. }
  543. LinkedListNode<KThread> nextNode = currentNode.Next;
  544. _mutexWaiters.Remove(currentNode);
  545. currentNode.Value.MutexOwner = newMutexOwner;
  546. if (newMutexOwner != null)
  547. {
  548. // New owner was already selected, re-insert on new owner list.
  549. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  550. }
  551. else
  552. {
  553. // New owner not selected yet, use current thread.
  554. newMutexOwner = currentNode.Value;
  555. }
  556. count++;
  557. currentNode = nextNode;
  558. }
  559. while (currentNode != null);
  560. if (newMutexOwner != null)
  561. {
  562. UpdatePriorityInheritance();
  563. newMutexOwner.UpdatePriorityInheritance();
  564. }
  565. return newMutexOwner;
  566. }
  567. private void UpdatePriorityInheritance()
  568. {
  569. // If any of the threads waiting for the mutex has
  570. // higher priority than the current thread, then
  571. // the current thread inherits that priority.
  572. int highestPriority = BasePriority;
  573. if (_mutexWaiters.First != null)
  574. {
  575. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  576. if (waitingDynamicPriority < highestPriority)
  577. {
  578. highestPriority = waitingDynamicPriority;
  579. }
  580. }
  581. if (highestPriority != DynamicPriority)
  582. {
  583. int oldPriority = DynamicPriority;
  584. DynamicPriority = highestPriority;
  585. AdjustSchedulingForNewPriority(oldPriority);
  586. if (MutexOwner != null)
  587. {
  588. // Remove and re-insert to ensure proper sorting based on new priority.
  589. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  590. MutexOwner.AddToMutexWaitersList(this);
  591. MutexOwner.UpdatePriorityInheritance();
  592. }
  593. }
  594. }
  595. private void AddToMutexWaitersList(KThread thread)
  596. {
  597. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  598. int currentPriority = thread.DynamicPriority;
  599. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  600. {
  601. nextPrio = nextPrio.Next;
  602. }
  603. if (nextPrio != null)
  604. {
  605. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  606. }
  607. else
  608. {
  609. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  610. }
  611. }
  612. private void AdjustScheduling(ThreadSchedState oldFlags)
  613. {
  614. if (oldFlags == SchedFlags)
  615. {
  616. return;
  617. }
  618. if (!IsSchedulable)
  619. {
  620. if (!_forcedUnschedulable)
  621. {
  622. // Ensure our thread is running and we have an event.
  623. StartHostThread();
  624. // If the thread is not schedulable, we want to just run or pause
  625. // it directly as we don't care about priority or the core it is
  626. // running on in this case.
  627. if (SchedFlags == ThreadSchedState.Running)
  628. {
  629. _schedulerWaitEvent.Set();
  630. }
  631. else
  632. {
  633. _schedulerWaitEvent.Reset();
  634. }
  635. }
  636. return;
  637. }
  638. if (oldFlags == ThreadSchedState.Running)
  639. {
  640. // Was running, now it's stopped.
  641. if (ActiveCore >= 0)
  642. {
  643. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  644. }
  645. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  646. {
  647. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  648. {
  649. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  650. }
  651. }
  652. }
  653. else if (SchedFlags == ThreadSchedState.Running)
  654. {
  655. // Was stopped, now it's running.
  656. if (ActiveCore >= 0)
  657. {
  658. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  659. }
  660. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  661. {
  662. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  663. {
  664. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  665. }
  666. }
  667. }
  668. KernelContext.ThreadReselectionRequested = true;
  669. }
  670. private void AdjustSchedulingForNewPriority(int oldPriority)
  671. {
  672. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  673. {
  674. return;
  675. }
  676. // Remove thread from the old priority queues.
  677. if (ActiveCore >= 0)
  678. {
  679. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  680. }
  681. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  682. {
  683. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  684. {
  685. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  686. }
  687. }
  688. // Add thread to the new priority queues.
  689. KThread currentThread = KernelStatic.GetCurrentThread();
  690. if (ActiveCore >= 0)
  691. {
  692. if (currentThread == this)
  693. {
  694. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  695. }
  696. else
  697. {
  698. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  699. }
  700. }
  701. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  702. {
  703. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  704. {
  705. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  706. }
  707. }
  708. KernelContext.ThreadReselectionRequested = true;
  709. }
  710. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  711. {
  712. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  713. {
  714. return;
  715. }
  716. // Remove thread from the old priority queues.
  717. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  718. {
  719. if (((oldAffinityMask >> core) & 1) != 0)
  720. {
  721. if (core == oldCore)
  722. {
  723. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  724. }
  725. else
  726. {
  727. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  728. }
  729. }
  730. }
  731. // Add thread to the new priority queues.
  732. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  733. {
  734. if (((AffinityMask >> core) & 1) != 0)
  735. {
  736. if (core == ActiveCore)
  737. {
  738. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  739. }
  740. else
  741. {
  742. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  743. }
  744. }
  745. }
  746. KernelContext.ThreadReselectionRequested = true;
  747. }
  748. public void SetEntryArguments(long argsPtr, int threadHandle)
  749. {
  750. Context.SetX(0, (ulong)argsPtr);
  751. Context.SetX(1, (ulong)threadHandle);
  752. }
  753. public void TimeUp()
  754. {
  755. ReleaseAndResume();
  756. }
  757. public string GetGuestStackTrace()
  758. {
  759. return Owner.Debugger.GetGuestStackTrace(this);
  760. }
  761. public string GetGuestRegisterPrintout()
  762. {
  763. return Owner.Debugger.GetCpuRegisterPrintout(this);
  764. }
  765. public void PrintGuestStackTrace()
  766. {
  767. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  768. }
  769. public void PrintGuestRegisterPrintout()
  770. {
  771. Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
  772. }
  773. public void AddCpuTime(long ticks)
  774. {
  775. Interlocked.Add(ref _totalTimeRunning, ticks);
  776. }
  777. public void StartHostThread()
  778. {
  779. if (_schedulerWaitEvent == null)
  780. {
  781. var schedulerWaitEvent = new ManualResetEvent(false);
  782. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  783. {
  784. HostThread.Start();
  785. }
  786. else
  787. {
  788. schedulerWaitEvent.Dispose();
  789. }
  790. }
  791. }
  792. private void ThreadStart()
  793. {
  794. _schedulerWaitEvent.WaitOne();
  795. KernelStatic.SetKernelContext(KernelContext, this);
  796. if (_customThreadStart != null)
  797. {
  798. _customThreadStart();
  799. }
  800. else
  801. {
  802. Owner.Context.Execute(Context, _entrypoint);
  803. }
  804. Context.Dispose();
  805. _schedulerWaitEvent.Dispose();
  806. }
  807. public void MakeUnschedulable()
  808. {
  809. _forcedUnschedulable = true;
  810. }
  811. public override bool IsSignaled()
  812. {
  813. return _hasExited != 0;
  814. }
  815. protected override void Destroy()
  816. {
  817. if (_hasBeenInitialized)
  818. {
  819. FreeResources();
  820. bool released = Owner != null || _hasBeenReleased;
  821. if (Owner != null)
  822. {
  823. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  824. Owner.DecrementReferenceCount();
  825. }
  826. else
  827. {
  828. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  829. }
  830. }
  831. }
  832. private void FreeResources()
  833. {
  834. Owner?.RemoveThread(this);
  835. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  836. {
  837. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  838. }
  839. KernelContext.CriticalSection.Enter();
  840. // Wake up all threads that may be waiting for a mutex being held by this thread.
  841. foreach (KThread thread in _mutexWaiters)
  842. {
  843. thread.MutexOwner = null;
  844. thread._preferredCoreOverride = 0;
  845. thread.ObjSyncResult = KernelResult.InvalidState;
  846. thread.ReleaseAndResume();
  847. }
  848. KernelContext.CriticalSection.Leave();
  849. Owner?.DecrementThreadCountAndTerminateIfZero();
  850. }
  851. }
  852. }