KThread.cs 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Numerics;
  8. using System.Threading;
  9. namespace Ryujinx.HLE.HOS.Kernel.Threading
  10. {
  11. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  12. {
  13. public const int MaxWaitSyncObjects = 64;
  14. private ManualResetEvent _schedulerWaitEvent;
  15. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  16. public Thread HostThread { get; private set; }
  17. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  18. public KThreadContext ThreadContext { get; private set; }
  19. public int DynamicPriority { get; set; }
  20. public long AffinityMask { get; set; }
  21. public long ThreadUid { get; private set; }
  22. private long _totalTimeRunning;
  23. public long TotalTimeRunning => _totalTimeRunning;
  24. public KSynchronizationObject SignaledObj { get; set; }
  25. public ulong CondVarAddress { get; set; }
  26. private ulong _entrypoint;
  27. private ThreadStart _customThreadStart;
  28. private bool _forcedUnschedulable;
  29. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  30. public ulong MutexAddress { get; set; }
  31. public KProcess Owner { get; private set; }
  32. private ulong _tlsAddress;
  33. public ulong TlsAddress => _tlsAddress;
  34. public KSynchronizationObject[] WaitSyncObjects { get; }
  35. public int[] WaitSyncHandles { get; }
  36. public long LastScheduledTime { get; set; }
  37. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  38. public LinkedList<KThread> Withholder { get; set; }
  39. public LinkedListNode<KThread> WithholderNode { get; set; }
  40. public LinkedListNode<KThread> ProcessListNode { get; set; }
  41. private LinkedList<KThread> _mutexWaiters;
  42. private LinkedListNode<KThread> _mutexWaiterNode;
  43. public KThread MutexOwner { get; private set; }
  44. public int ThreadHandleForUserMutex { get; set; }
  45. private ThreadSchedState _forcePauseFlags;
  46. public KernelResult ObjSyncResult { get; set; }
  47. public int BasePriority { get; set; }
  48. public int PreferredCore { get; set; }
  49. public int CurrentCore { get; set; }
  50. public int ActiveCore { get; set; }
  51. private long _affinityMaskOverride;
  52. private int _preferredCoreOverride;
  53. #pragma warning disable CS0649
  54. private int _affinityOverrideCount;
  55. #pragma warning restore CS0649
  56. public ThreadSchedState SchedFlags { get; private set; }
  57. private int _shallBeTerminated;
  58. public bool ShallBeTerminated
  59. {
  60. get => _shallBeTerminated != 0;
  61. set => _shallBeTerminated = value ? 1 : 0;
  62. }
  63. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  64. public bool SyncCancelled { get; set; }
  65. public bool WaitingSync { get; set; }
  66. private int _hasExited;
  67. private bool _hasBeenInitialized;
  68. private bool _hasBeenReleased;
  69. public bool WaitingInArbitration { get; set; }
  70. public long LastPc { get; set; }
  71. public KThread(KernelContext context) : base(context)
  72. {
  73. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  74. WaitSyncHandles = new int[MaxWaitSyncObjects];
  75. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  76. _mutexWaiters = new LinkedList<KThread>();
  77. }
  78. public KernelResult Initialize(
  79. ulong entrypoint,
  80. ulong argsPtr,
  81. ulong stackTop,
  82. int priority,
  83. int cpuCore,
  84. KProcess owner,
  85. ThreadType type,
  86. ThreadStart customThreadStart = null)
  87. {
  88. if ((uint)type > 3)
  89. {
  90. throw new ArgumentException($"Invalid thread type \"{type}\".");
  91. }
  92. ThreadContext = new KThreadContext();
  93. PreferredCore = cpuCore;
  94. AffinityMask |= 1L << cpuCore;
  95. SchedFlags = type == ThreadType.Dummy
  96. ? ThreadSchedState.Running
  97. : ThreadSchedState.None;
  98. ActiveCore = cpuCore;
  99. ObjSyncResult = KernelResult.ThreadNotStarted;
  100. DynamicPriority = priority;
  101. BasePriority = priority;
  102. CurrentCore = cpuCore;
  103. _entrypoint = entrypoint;
  104. _customThreadStart = customThreadStart;
  105. if (type == ThreadType.User)
  106. {
  107. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  108. {
  109. return KernelResult.OutOfMemory;
  110. }
  111. MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
  112. }
  113. bool is64Bits;
  114. if (owner != null)
  115. {
  116. Owner = owner;
  117. owner.IncrementReferenceCount();
  118. owner.IncrementThreadCount();
  119. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  120. }
  121. else
  122. {
  123. is64Bits = true;
  124. }
  125. HostThread = new Thread(ThreadStart);
  126. Context = CpuContext.CreateExecutionContext();
  127. Context.IsAarch32 = !is64Bits;
  128. Context.SetX(0, argsPtr);
  129. if (is64Bits)
  130. {
  131. Context.SetX(31, stackTop);
  132. }
  133. else
  134. {
  135. Context.SetX(13, (uint)stackTop);
  136. }
  137. Context.CntfrqEl0 = 19200000;
  138. Context.Tpidr = (long)_tlsAddress;
  139. ThreadUid = KernelContext.NewThreadUid();
  140. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  141. _hasBeenInitialized = true;
  142. if (owner != null)
  143. {
  144. owner.SubscribeThreadEventHandlers(Context);
  145. owner.AddThread(this);
  146. if (owner.IsPaused)
  147. {
  148. KernelContext.CriticalSection.Enter();
  149. if (TerminationRequested)
  150. {
  151. KernelContext.CriticalSection.Leave();
  152. return KernelResult.Success;
  153. }
  154. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  155. CombineForcePauseFlags();
  156. KernelContext.CriticalSection.Leave();
  157. }
  158. }
  159. return KernelResult.Success;
  160. }
  161. public KernelResult Start()
  162. {
  163. if (!KernelContext.KernelInitialized)
  164. {
  165. KernelContext.CriticalSection.Enter();
  166. if (!TerminationRequested)
  167. {
  168. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  169. CombineForcePauseFlags();
  170. }
  171. KernelContext.CriticalSection.Leave();
  172. }
  173. KernelResult result = KernelResult.ThreadTerminating;
  174. KernelContext.CriticalSection.Enter();
  175. if (!ShallBeTerminated)
  176. {
  177. KThread currentThread = KernelStatic.GetCurrentThread();
  178. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  179. {
  180. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  181. {
  182. result = KernelResult.InvalidState;
  183. break;
  184. }
  185. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  186. {
  187. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  188. {
  189. CombineForcePauseFlags();
  190. }
  191. SetNewSchedFlags(ThreadSchedState.Running);
  192. StartHostThread();
  193. result = KernelResult.Success;
  194. break;
  195. }
  196. else
  197. {
  198. currentThread.CombineForcePauseFlags();
  199. KernelContext.CriticalSection.Leave();
  200. KernelContext.CriticalSection.Enter();
  201. if (currentThread.ShallBeTerminated)
  202. {
  203. break;
  204. }
  205. }
  206. }
  207. }
  208. KernelContext.CriticalSection.Leave();
  209. return result;
  210. }
  211. public ThreadSchedState PrepareForTermination()
  212. {
  213. KernelContext.CriticalSection.Enter();
  214. ThreadSchedState result;
  215. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  216. {
  217. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  218. {
  219. SchedFlags = ThreadSchedState.TerminationPending;
  220. }
  221. else
  222. {
  223. if (_forcePauseFlags != ThreadSchedState.None)
  224. {
  225. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  226. ThreadSchedState oldSchedFlags = SchedFlags;
  227. SchedFlags &= ThreadSchedState.LowMask;
  228. AdjustScheduling(oldSchedFlags);
  229. }
  230. if (BasePriority >= 0x10)
  231. {
  232. SetPriority(0xF);
  233. }
  234. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  235. {
  236. // TODO: GIC distributor stuffs (sgir changes ect)
  237. Context.RequestInterrupt();
  238. }
  239. SignaledObj = null;
  240. ObjSyncResult = KernelResult.ThreadTerminating;
  241. ReleaseAndResume();
  242. }
  243. }
  244. result = SchedFlags;
  245. KernelContext.CriticalSection.Leave();
  246. return result & ThreadSchedState.LowMask;
  247. }
  248. public void Terminate()
  249. {
  250. ThreadSchedState state = PrepareForTermination();
  251. if (state != ThreadSchedState.TerminationPending)
  252. {
  253. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  254. }
  255. }
  256. public void HandlePostSyscall()
  257. {
  258. ThreadSchedState state;
  259. do
  260. {
  261. if (TerminationRequested)
  262. {
  263. Exit();
  264. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  265. break;
  266. }
  267. KernelContext.CriticalSection.Enter();
  268. if (TerminationRequested)
  269. {
  270. state = ThreadSchedState.TerminationPending;
  271. }
  272. else
  273. {
  274. if (_forcePauseFlags != ThreadSchedState.None)
  275. {
  276. CombineForcePauseFlags();
  277. }
  278. state = ThreadSchedState.Running;
  279. }
  280. KernelContext.CriticalSection.Leave();
  281. } while (state == ThreadSchedState.TerminationPending);
  282. }
  283. public void Exit()
  284. {
  285. // TODO: Debug event.
  286. if (Owner != null)
  287. {
  288. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  289. _hasBeenReleased = true;
  290. }
  291. KernelContext.CriticalSection.Enter();
  292. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  293. bool decRef = ExitImpl();
  294. Context.StopRunning();
  295. KernelContext.CriticalSection.Leave();
  296. if (decRef)
  297. {
  298. DecrementReferenceCount();
  299. }
  300. }
  301. private bool ExitImpl()
  302. {
  303. KernelContext.CriticalSection.Enter();
  304. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  305. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  306. Signal();
  307. KernelContext.CriticalSection.Leave();
  308. return decRef;
  309. }
  310. public KernelResult Sleep(long timeout)
  311. {
  312. KernelContext.CriticalSection.Enter();
  313. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  314. {
  315. KernelContext.CriticalSection.Leave();
  316. return KernelResult.ThreadTerminating;
  317. }
  318. SetNewSchedFlags(ThreadSchedState.Paused);
  319. if (timeout > 0)
  320. {
  321. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  322. }
  323. KernelContext.CriticalSection.Leave();
  324. if (timeout > 0)
  325. {
  326. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  327. }
  328. return 0;
  329. }
  330. public void SetPriority(int priority)
  331. {
  332. KernelContext.CriticalSection.Enter();
  333. BasePriority = priority;
  334. UpdatePriorityInheritance();
  335. KernelContext.CriticalSection.Leave();
  336. }
  337. public void Suspend(ThreadSchedState type)
  338. {
  339. _forcePauseFlags |= type;
  340. CombineForcePauseFlags();
  341. }
  342. public void Resume(ThreadSchedState type)
  343. {
  344. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  345. _forcePauseFlags &= ~type;
  346. if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
  347. {
  348. ThreadSchedState oldSchedFlags = SchedFlags;
  349. SchedFlags &= ThreadSchedState.LowMask;
  350. AdjustScheduling(oldSchedFlags);
  351. }
  352. }
  353. public KernelResult SetActivity(bool pause)
  354. {
  355. KernelResult result = KernelResult.Success;
  356. KernelContext.CriticalSection.Enter();
  357. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  358. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  359. {
  360. KernelContext.CriticalSection.Leave();
  361. return KernelResult.InvalidState;
  362. }
  363. KernelContext.CriticalSection.Enter();
  364. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  365. {
  366. if (pause)
  367. {
  368. // Pause, the force pause flag should be clear (thread is NOT paused).
  369. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  370. {
  371. Suspend(ThreadSchedState.ThreadPauseFlag);
  372. }
  373. else
  374. {
  375. result = KernelResult.InvalidState;
  376. }
  377. }
  378. else
  379. {
  380. // Unpause, the force pause flag should be set (thread is paused).
  381. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  382. {
  383. Resume(ThreadSchedState.ThreadPauseFlag);
  384. }
  385. else
  386. {
  387. result = KernelResult.InvalidState;
  388. }
  389. }
  390. }
  391. KernelContext.CriticalSection.Leave();
  392. KernelContext.CriticalSection.Leave();
  393. return result;
  394. }
  395. public void CancelSynchronization()
  396. {
  397. KernelContext.CriticalSection.Enter();
  398. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  399. {
  400. SyncCancelled = true;
  401. }
  402. else if (Withholder != null)
  403. {
  404. Withholder.Remove(WithholderNode);
  405. SetNewSchedFlags(ThreadSchedState.Running);
  406. Withholder = null;
  407. SyncCancelled = true;
  408. }
  409. else
  410. {
  411. SignaledObj = null;
  412. ObjSyncResult = KernelResult.Cancelled;
  413. SetNewSchedFlags(ThreadSchedState.Running);
  414. SyncCancelled = false;
  415. }
  416. KernelContext.CriticalSection.Leave();
  417. }
  418. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  419. {
  420. KernelContext.CriticalSection.Enter();
  421. bool useOverride = _affinityOverrideCount != 0;
  422. // The value -3 is "do not change the preferred core".
  423. if (newCore == -3)
  424. {
  425. newCore = useOverride ? _preferredCoreOverride : PreferredCore;
  426. if ((newAffinityMask & (1 << newCore)) == 0)
  427. {
  428. KernelContext.CriticalSection.Leave();
  429. return KernelResult.InvalidCombination;
  430. }
  431. }
  432. if (useOverride)
  433. {
  434. _preferredCoreOverride = newCore;
  435. _affinityMaskOverride = newAffinityMask;
  436. }
  437. else
  438. {
  439. long oldAffinityMask = AffinityMask;
  440. PreferredCore = newCore;
  441. AffinityMask = newAffinityMask;
  442. if (oldAffinityMask != newAffinityMask)
  443. {
  444. int oldCore = ActiveCore;
  445. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  446. {
  447. if (PreferredCore < 0)
  448. {
  449. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  450. }
  451. else
  452. {
  453. ActiveCore = PreferredCore;
  454. }
  455. }
  456. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  457. }
  458. }
  459. KernelContext.CriticalSection.Leave();
  460. return KernelResult.Success;
  461. }
  462. private void CombineForcePauseFlags()
  463. {
  464. ThreadSchedState oldFlags = SchedFlags;
  465. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  466. SchedFlags = lowNibble | _forcePauseFlags;
  467. AdjustScheduling(oldFlags);
  468. }
  469. private void SetNewSchedFlags(ThreadSchedState newFlags)
  470. {
  471. KernelContext.CriticalSection.Enter();
  472. ThreadSchedState oldFlags = SchedFlags;
  473. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  474. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  475. {
  476. AdjustScheduling(oldFlags);
  477. }
  478. KernelContext.CriticalSection.Leave();
  479. }
  480. public void ReleaseAndResume()
  481. {
  482. KernelContext.CriticalSection.Enter();
  483. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  484. {
  485. if (Withholder != null)
  486. {
  487. Withholder.Remove(WithholderNode);
  488. SetNewSchedFlags(ThreadSchedState.Running);
  489. Withholder = null;
  490. }
  491. else
  492. {
  493. SetNewSchedFlags(ThreadSchedState.Running);
  494. }
  495. }
  496. KernelContext.CriticalSection.Leave();
  497. }
  498. public void Reschedule(ThreadSchedState newFlags)
  499. {
  500. KernelContext.CriticalSection.Enter();
  501. ThreadSchedState oldFlags = SchedFlags;
  502. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  503. (newFlags & ThreadSchedState.LowMask);
  504. AdjustScheduling(oldFlags);
  505. KernelContext.CriticalSection.Leave();
  506. }
  507. public void AddMutexWaiter(KThread requester)
  508. {
  509. AddToMutexWaitersList(requester);
  510. requester.MutexOwner = this;
  511. UpdatePriorityInheritance();
  512. }
  513. public void RemoveMutexWaiter(KThread thread)
  514. {
  515. if (thread._mutexWaiterNode?.List != null)
  516. {
  517. _mutexWaiters.Remove(thread._mutexWaiterNode);
  518. }
  519. thread.MutexOwner = null;
  520. UpdatePriorityInheritance();
  521. }
  522. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  523. {
  524. count = 0;
  525. if (_mutexWaiters.First == null)
  526. {
  527. return null;
  528. }
  529. KThread newMutexOwner = null;
  530. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  531. do
  532. {
  533. // Skip all threads that are not waiting for this mutex.
  534. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  535. {
  536. currentNode = currentNode.Next;
  537. }
  538. if (currentNode == null)
  539. {
  540. break;
  541. }
  542. LinkedListNode<KThread> nextNode = currentNode.Next;
  543. _mutexWaiters.Remove(currentNode);
  544. currentNode.Value.MutexOwner = newMutexOwner;
  545. if (newMutexOwner != null)
  546. {
  547. // New owner was already selected, re-insert on new owner list.
  548. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  549. }
  550. else
  551. {
  552. // New owner not selected yet, use current thread.
  553. newMutexOwner = currentNode.Value;
  554. }
  555. count++;
  556. currentNode = nextNode;
  557. }
  558. while (currentNode != null);
  559. if (newMutexOwner != null)
  560. {
  561. UpdatePriorityInheritance();
  562. newMutexOwner.UpdatePriorityInheritance();
  563. }
  564. return newMutexOwner;
  565. }
  566. private void UpdatePriorityInheritance()
  567. {
  568. // If any of the threads waiting for the mutex has
  569. // higher priority than the current thread, then
  570. // the current thread inherits that priority.
  571. int highestPriority = BasePriority;
  572. if (_mutexWaiters.First != null)
  573. {
  574. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  575. if (waitingDynamicPriority < highestPriority)
  576. {
  577. highestPriority = waitingDynamicPriority;
  578. }
  579. }
  580. if (highestPriority != DynamicPriority)
  581. {
  582. int oldPriority = DynamicPriority;
  583. DynamicPriority = highestPriority;
  584. AdjustSchedulingForNewPriority(oldPriority);
  585. if (MutexOwner != null)
  586. {
  587. // Remove and re-insert to ensure proper sorting based on new priority.
  588. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  589. MutexOwner.AddToMutexWaitersList(this);
  590. MutexOwner.UpdatePriorityInheritance();
  591. }
  592. }
  593. }
  594. private void AddToMutexWaitersList(KThread thread)
  595. {
  596. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  597. int currentPriority = thread.DynamicPriority;
  598. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  599. {
  600. nextPrio = nextPrio.Next;
  601. }
  602. if (nextPrio != null)
  603. {
  604. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  605. }
  606. else
  607. {
  608. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  609. }
  610. }
  611. private void AdjustScheduling(ThreadSchedState oldFlags)
  612. {
  613. if (oldFlags == SchedFlags)
  614. {
  615. return;
  616. }
  617. if (!IsSchedulable)
  618. {
  619. if (!_forcedUnschedulable)
  620. {
  621. // Ensure our thread is running and we have an event.
  622. StartHostThread();
  623. // If the thread is not schedulable, we want to just run or pause
  624. // it directly as we don't care about priority or the core it is
  625. // running on in this case.
  626. if (SchedFlags == ThreadSchedState.Running)
  627. {
  628. _schedulerWaitEvent.Set();
  629. }
  630. else
  631. {
  632. _schedulerWaitEvent.Reset();
  633. }
  634. }
  635. return;
  636. }
  637. if (oldFlags == ThreadSchedState.Running)
  638. {
  639. // Was running, now it's stopped.
  640. if (ActiveCore >= 0)
  641. {
  642. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  643. }
  644. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  645. {
  646. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  647. {
  648. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  649. }
  650. }
  651. }
  652. else if (SchedFlags == ThreadSchedState.Running)
  653. {
  654. // Was stopped, now it's running.
  655. if (ActiveCore >= 0)
  656. {
  657. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  658. }
  659. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  660. {
  661. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  662. {
  663. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  664. }
  665. }
  666. }
  667. KernelContext.ThreadReselectionRequested = true;
  668. }
  669. private void AdjustSchedulingForNewPriority(int oldPriority)
  670. {
  671. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  672. {
  673. return;
  674. }
  675. // Remove thread from the old priority queues.
  676. if (ActiveCore >= 0)
  677. {
  678. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  679. }
  680. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  681. {
  682. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  683. {
  684. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  685. }
  686. }
  687. // Add thread to the new priority queues.
  688. KThread currentThread = KernelStatic.GetCurrentThread();
  689. if (ActiveCore >= 0)
  690. {
  691. if (currentThread == this)
  692. {
  693. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  694. }
  695. else
  696. {
  697. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  698. }
  699. }
  700. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  701. {
  702. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  703. {
  704. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  705. }
  706. }
  707. KernelContext.ThreadReselectionRequested = true;
  708. }
  709. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  710. {
  711. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  712. {
  713. return;
  714. }
  715. // Remove thread from the old priority queues.
  716. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  717. {
  718. if (((oldAffinityMask >> core) & 1) != 0)
  719. {
  720. if (core == oldCore)
  721. {
  722. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  723. }
  724. else
  725. {
  726. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  727. }
  728. }
  729. }
  730. // Add thread to the new priority queues.
  731. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  732. {
  733. if (((AffinityMask >> core) & 1) != 0)
  734. {
  735. if (core == ActiveCore)
  736. {
  737. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  738. }
  739. else
  740. {
  741. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  742. }
  743. }
  744. }
  745. KernelContext.ThreadReselectionRequested = true;
  746. }
  747. public void SetEntryArguments(long argsPtr, int threadHandle)
  748. {
  749. Context.SetX(0, (ulong)argsPtr);
  750. Context.SetX(1, (ulong)threadHandle);
  751. }
  752. public void TimeUp()
  753. {
  754. ReleaseAndResume();
  755. }
  756. public string GetGuestStackTrace()
  757. {
  758. return Owner.Debugger.GetGuestStackTrace(this);
  759. }
  760. public string GetGuestRegisterPrintout()
  761. {
  762. return Owner.Debugger.GetCpuRegisterPrintout(this);
  763. }
  764. public void PrintGuestStackTrace()
  765. {
  766. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  767. }
  768. public void PrintGuestRegisterPrintout()
  769. {
  770. Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
  771. }
  772. public void AddCpuTime(long ticks)
  773. {
  774. Interlocked.Add(ref _totalTimeRunning, ticks);
  775. }
  776. public void StartHostThread()
  777. {
  778. if (_schedulerWaitEvent == null)
  779. {
  780. var schedulerWaitEvent = new ManualResetEvent(false);
  781. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  782. {
  783. HostThread.Start();
  784. }
  785. else
  786. {
  787. schedulerWaitEvent.Dispose();
  788. }
  789. }
  790. }
  791. private void ThreadStart()
  792. {
  793. _schedulerWaitEvent.WaitOne();
  794. KernelStatic.SetKernelContext(KernelContext, this);
  795. if (_customThreadStart != null)
  796. {
  797. _customThreadStart();
  798. }
  799. else
  800. {
  801. Owner.Context.Execute(Context, _entrypoint);
  802. }
  803. Context.Dispose();
  804. _schedulerWaitEvent.Dispose();
  805. }
  806. public void MakeUnschedulable()
  807. {
  808. _forcedUnschedulable = true;
  809. }
  810. public override bool IsSignaled()
  811. {
  812. return _hasExited != 0;
  813. }
  814. protected override void Destroy()
  815. {
  816. if (_hasBeenInitialized)
  817. {
  818. FreeResources();
  819. bool released = Owner != null || _hasBeenReleased;
  820. if (Owner != null)
  821. {
  822. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  823. Owner.DecrementReferenceCount();
  824. }
  825. else
  826. {
  827. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  828. }
  829. }
  830. }
  831. private void FreeResources()
  832. {
  833. Owner?.RemoveThread(this);
  834. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  835. {
  836. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  837. }
  838. KernelContext.CriticalSection.Enter();
  839. // Wake up all threads that may be waiting for a mutex being held by this thread.
  840. foreach (KThread thread in _mutexWaiters)
  841. {
  842. thread.MutexOwner = null;
  843. thread._preferredCoreOverride = 0;
  844. thread.ObjSyncResult = KernelResult.InvalidState;
  845. thread.ReleaseAndResume();
  846. }
  847. KernelContext.CriticalSection.Leave();
  848. Owner?.DecrementThreadCountAndTerminateIfZero();
  849. }
  850. }
  851. }