KThread.cs 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Linq;
  8. using System.Text;
  9. using System.Threading;
  10. namespace Ryujinx.HLE.HOS.Kernel.Threading
  11. {
  12. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  13. {
  14. public const int MaxWaitSyncObjects = 64;
  15. private int _hostThreadRunning;
  16. public Thread HostThread { get; private set; }
  17. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  18. public long AffinityMask { get; set; }
  19. public long ThreadUid { get; private set; }
  20. public long TotalTimeRunning { get; set; }
  21. public KSynchronizationObject SignaledObj { get; set; }
  22. public ulong CondVarAddress { get; set; }
  23. private ulong _entrypoint;
  24. private ThreadStart _customThreadStart;
  25. public ulong MutexAddress { get; set; }
  26. public KProcess Owner { get; private set; }
  27. private ulong _tlsAddress;
  28. public ulong TlsAddress => _tlsAddress;
  29. public ulong TlsDramAddress { get; private set; }
  30. public KSynchronizationObject[] WaitSyncObjects { get; }
  31. public int[] WaitSyncHandles { get; }
  32. public long LastScheduledTime { get; set; }
  33. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  34. public LinkedList<KThread> Withholder { get; set; }
  35. public LinkedListNode<KThread> WithholderNode { get; set; }
  36. public LinkedListNode<KThread> ProcessListNode { get; set; }
  37. private LinkedList<KThread> _mutexWaiters;
  38. private LinkedListNode<KThread> _mutexWaiterNode;
  39. public KThread MutexOwner { get; private set; }
  40. public int ThreadHandleForUserMutex { get; set; }
  41. private ThreadSchedState _forcePauseFlags;
  42. public KernelResult ObjSyncResult { get; set; }
  43. public int DynamicPriority { get; set; }
  44. public int CurrentCore { get; set; }
  45. public int BasePriority { get; set; }
  46. public int PreferredCore { get; set; }
  47. private long _affinityMaskOverride;
  48. private int _preferredCoreOverride;
  49. #pragma warning disable CS0649
  50. private int _affinityOverrideCount;
  51. #pragma warning restore CS0649
  52. public ThreadSchedState SchedFlags { get; private set; }
  53. private int _shallBeTerminated;
  54. public bool ShallBeTerminated
  55. {
  56. get => _shallBeTerminated != 0;
  57. set => _shallBeTerminated = value ? 1 : 0;
  58. }
  59. public bool SyncCancelled { get; set; }
  60. public bool WaitingSync { get; set; }
  61. private bool _hasExited;
  62. private bool _hasBeenInitialized;
  63. private bool _hasBeenReleased;
  64. public bool WaitingInArbitration { get; set; }
  65. private KScheduler _scheduler;
  66. private KSchedulingData _schedulingData;
  67. public long LastPc { get; set; }
  68. public KThread(KernelContext context) : base(context)
  69. {
  70. _scheduler = KernelContext.Scheduler;
  71. _schedulingData = KernelContext.Scheduler.SchedulingData;
  72. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  73. WaitSyncHandles = new int[MaxWaitSyncObjects];
  74. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  75. _mutexWaiters = new LinkedList<KThread>();
  76. }
  77. public KernelResult Initialize(
  78. ulong entrypoint,
  79. ulong argsPtr,
  80. ulong stackTop,
  81. int priority,
  82. int defaultCpuCore,
  83. KProcess owner,
  84. ThreadType type,
  85. ThreadStart customThreadStart = null)
  86. {
  87. if ((uint)type > 3)
  88. {
  89. throw new ArgumentException($"Invalid thread type \"{type}\".");
  90. }
  91. PreferredCore = defaultCpuCore;
  92. AffinityMask |= 1L << defaultCpuCore;
  93. SchedFlags = type == ThreadType.Dummy
  94. ? ThreadSchedState.Running
  95. : ThreadSchedState.None;
  96. CurrentCore = PreferredCore;
  97. DynamicPriority = priority;
  98. BasePriority = priority;
  99. ObjSyncResult = KernelResult.ThreadNotStarted;
  100. _entrypoint = entrypoint;
  101. _customThreadStart = customThreadStart;
  102. if (type == ThreadType.User)
  103. {
  104. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  105. {
  106. return KernelResult.OutOfMemory;
  107. }
  108. TlsDramAddress = owner.MemoryManager.GetDramAddressFromVa(_tlsAddress);
  109. MemoryHelper.FillWithZeros(owner.CpuMemory, (long)_tlsAddress, KTlsPageInfo.TlsEntrySize);
  110. }
  111. bool is64Bits;
  112. if (owner != null)
  113. {
  114. Owner = owner;
  115. owner.IncrementReferenceCount();
  116. owner.IncrementThreadCount();
  117. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  118. }
  119. else
  120. {
  121. is64Bits = true;
  122. }
  123. HostThread = new Thread(ThreadStart);
  124. Context = CpuContext.CreateExecutionContext();
  125. bool isAarch32 = !Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  126. Context.IsAarch32 = isAarch32;
  127. Context.SetX(0, argsPtr);
  128. if (isAarch32)
  129. {
  130. Context.SetX(13, (uint)stackTop);
  131. }
  132. else
  133. {
  134. Context.SetX(31, stackTop);
  135. }
  136. Context.CntfrqEl0 = 19200000;
  137. Context.Tpidr = (long)_tlsAddress;
  138. owner.SubscribeThreadEventHandlers(Context);
  139. ThreadUid = KernelContext.NewThreadUid();
  140. HostThread.Name = $"HLE.HostThread.{ThreadUid}";
  141. _hasBeenInitialized = true;
  142. if (owner != null)
  143. {
  144. owner.AddThread(this);
  145. if (owner.IsPaused)
  146. {
  147. KernelContext.CriticalSection.Enter();
  148. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  149. {
  150. KernelContext.CriticalSection.Leave();
  151. return KernelResult.Success;
  152. }
  153. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  154. CombineForcePauseFlags();
  155. KernelContext.CriticalSection.Leave();
  156. }
  157. }
  158. return KernelResult.Success;
  159. }
  160. public KernelResult Start()
  161. {
  162. if (!KernelContext.KernelInitialized)
  163. {
  164. KernelContext.CriticalSection.Enter();
  165. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  166. {
  167. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  168. CombineForcePauseFlags();
  169. }
  170. KernelContext.CriticalSection.Leave();
  171. }
  172. KernelResult result = KernelResult.ThreadTerminating;
  173. KernelContext.CriticalSection.Enter();
  174. if (!ShallBeTerminated)
  175. {
  176. KThread currentThread = KernelContext.Scheduler.GetCurrentThread();
  177. while (SchedFlags != ThreadSchedState.TerminationPending &&
  178. currentThread.SchedFlags != ThreadSchedState.TerminationPending &&
  179. !currentThread.ShallBeTerminated)
  180. {
  181. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  182. {
  183. result = KernelResult.InvalidState;
  184. break;
  185. }
  186. if (currentThread._forcePauseFlags == ThreadSchedState.None)
  187. {
  188. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  189. {
  190. CombineForcePauseFlags();
  191. }
  192. SetNewSchedFlags(ThreadSchedState.Running);
  193. result = KernelResult.Success;
  194. break;
  195. }
  196. else
  197. {
  198. currentThread.CombineForcePauseFlags();
  199. KernelContext.CriticalSection.Leave();
  200. KernelContext.CriticalSection.Enter();
  201. if (currentThread.ShallBeTerminated)
  202. {
  203. break;
  204. }
  205. }
  206. }
  207. }
  208. KernelContext.CriticalSection.Leave();
  209. return result;
  210. }
  211. public void Exit()
  212. {
  213. // TODO: Debug event.
  214. if (Owner != null)
  215. {
  216. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  217. _hasBeenReleased = true;
  218. }
  219. KernelContext.CriticalSection.Enter();
  220. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  221. ExitImpl();
  222. KernelContext.CriticalSection.Leave();
  223. DecrementReferenceCount();
  224. }
  225. public ThreadSchedState PrepareForTermination()
  226. {
  227. KernelContext.CriticalSection.Enter();
  228. ThreadSchedState result;
  229. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  230. {
  231. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  232. {
  233. SchedFlags = ThreadSchedState.TerminationPending;
  234. }
  235. else
  236. {
  237. if (_forcePauseFlags != ThreadSchedState.None)
  238. {
  239. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  240. ThreadSchedState oldSchedFlags = SchedFlags;
  241. SchedFlags &= ThreadSchedState.LowMask;
  242. AdjustScheduling(oldSchedFlags);
  243. }
  244. if (BasePriority >= 0x10)
  245. {
  246. SetPriority(0xF);
  247. }
  248. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  249. {
  250. // TODO: GIC distributor stuffs (sgir changes ect)
  251. Context.RequestInterrupt();
  252. }
  253. SignaledObj = null;
  254. ObjSyncResult = KernelResult.ThreadTerminating;
  255. ReleaseAndResume();
  256. }
  257. }
  258. result = SchedFlags;
  259. KernelContext.CriticalSection.Leave();
  260. return result & ThreadSchedState.LowMask;
  261. }
  262. public void Terminate()
  263. {
  264. ThreadSchedState state = PrepareForTermination();
  265. if (state != ThreadSchedState.TerminationPending)
  266. {
  267. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  268. }
  269. }
  270. public void HandlePostSyscall()
  271. {
  272. ThreadSchedState state;
  273. do
  274. {
  275. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  276. {
  277. KernelContext.Scheduler.ExitThread(this);
  278. Exit();
  279. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  280. break;
  281. }
  282. KernelContext.CriticalSection.Enter();
  283. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  284. {
  285. state = ThreadSchedState.TerminationPending;
  286. }
  287. else
  288. {
  289. if (_forcePauseFlags != ThreadSchedState.None)
  290. {
  291. CombineForcePauseFlags();
  292. }
  293. state = ThreadSchedState.Running;
  294. }
  295. KernelContext.CriticalSection.Leave();
  296. } while (state == ThreadSchedState.TerminationPending);
  297. }
  298. private void ExitImpl()
  299. {
  300. KernelContext.CriticalSection.Enter();
  301. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  302. _hasExited = true;
  303. Signal();
  304. KernelContext.CriticalSection.Leave();
  305. }
  306. public KernelResult Sleep(long timeout)
  307. {
  308. KernelContext.CriticalSection.Enter();
  309. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  310. {
  311. KernelContext.CriticalSection.Leave();
  312. return KernelResult.ThreadTerminating;
  313. }
  314. SetNewSchedFlags(ThreadSchedState.Paused);
  315. if (timeout > 0)
  316. {
  317. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  318. }
  319. KernelContext.CriticalSection.Leave();
  320. if (timeout > 0)
  321. {
  322. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  323. }
  324. return 0;
  325. }
  326. public void Yield()
  327. {
  328. KernelContext.CriticalSection.Enter();
  329. if (SchedFlags != ThreadSchedState.Running)
  330. {
  331. KernelContext.CriticalSection.Leave();
  332. KernelContext.Scheduler.ContextSwitch();
  333. return;
  334. }
  335. if (DynamicPriority < KScheduler.PrioritiesCount)
  336. {
  337. // Move current thread to the end of the queue.
  338. _schedulingData.Reschedule(DynamicPriority, CurrentCore, this);
  339. }
  340. _scheduler.ThreadReselectionRequested = true;
  341. KernelContext.CriticalSection.Leave();
  342. KernelContext.Scheduler.ContextSwitch();
  343. }
  344. public void YieldWithLoadBalancing()
  345. {
  346. KernelContext.CriticalSection.Enter();
  347. if (SchedFlags != ThreadSchedState.Running)
  348. {
  349. KernelContext.CriticalSection.Leave();
  350. KernelContext.Scheduler.ContextSwitch();
  351. return;
  352. }
  353. int prio = DynamicPriority;
  354. int core = CurrentCore;
  355. KThread nextThreadOnCurrentQueue = null;
  356. if (DynamicPriority < KScheduler.PrioritiesCount)
  357. {
  358. // Move current thread to the end of the queue.
  359. _schedulingData.Reschedule(prio, core, this);
  360. Func<KThread, bool> predicate = x => x.DynamicPriority == prio;
  361. nextThreadOnCurrentQueue = _schedulingData.ScheduledThreads(core).FirstOrDefault(predicate);
  362. }
  363. IEnumerable<KThread> SuitableCandidates()
  364. {
  365. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  366. {
  367. int srcCore = thread.CurrentCore;
  368. if (srcCore >= 0)
  369. {
  370. KThread selectedSrcCore = _scheduler.CoreContexts[srcCore].SelectedThread;
  371. if (selectedSrcCore == thread || ((selectedSrcCore?.DynamicPriority ?? 2) < 2))
  372. {
  373. continue;
  374. }
  375. }
  376. // If the candidate was scheduled after the current thread, then it's not worth it,
  377. // unless the priority is higher than the current one.
  378. if (nextThreadOnCurrentQueue.LastScheduledTime >= thread.LastScheduledTime ||
  379. nextThreadOnCurrentQueue.DynamicPriority < thread.DynamicPriority)
  380. {
  381. yield return thread;
  382. }
  383. }
  384. }
  385. KThread dst = SuitableCandidates().FirstOrDefault(x => x.DynamicPriority <= prio);
  386. if (dst != null)
  387. {
  388. _schedulingData.TransferToCore(dst.DynamicPriority, core, dst);
  389. _scheduler.ThreadReselectionRequested = true;
  390. }
  391. if (this != nextThreadOnCurrentQueue)
  392. {
  393. _scheduler.ThreadReselectionRequested = true;
  394. }
  395. KernelContext.CriticalSection.Leave();
  396. KernelContext.Scheduler.ContextSwitch();
  397. }
  398. public void YieldAndWaitForLoadBalancing()
  399. {
  400. KernelContext.CriticalSection.Enter();
  401. if (SchedFlags != ThreadSchedState.Running)
  402. {
  403. KernelContext.CriticalSection.Leave();
  404. KernelContext.Scheduler.ContextSwitch();
  405. return;
  406. }
  407. int core = CurrentCore;
  408. _schedulingData.TransferToCore(DynamicPriority, -1, this);
  409. KThread selectedThread = null;
  410. if (!_schedulingData.ScheduledThreads(core).Any())
  411. {
  412. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  413. {
  414. if (thread.CurrentCore < 0)
  415. {
  416. continue;
  417. }
  418. KThread firstCandidate = _schedulingData.ScheduledThreads(thread.CurrentCore).FirstOrDefault();
  419. if (firstCandidate == thread)
  420. {
  421. continue;
  422. }
  423. if (firstCandidate == null || firstCandidate.DynamicPriority >= 2)
  424. {
  425. _schedulingData.TransferToCore(thread.DynamicPriority, core, thread);
  426. selectedThread = thread;
  427. }
  428. break;
  429. }
  430. }
  431. if (selectedThread != this)
  432. {
  433. _scheduler.ThreadReselectionRequested = true;
  434. }
  435. KernelContext.CriticalSection.Leave();
  436. KernelContext.Scheduler.ContextSwitch();
  437. }
  438. public void SetPriority(int priority)
  439. {
  440. KernelContext.CriticalSection.Enter();
  441. BasePriority = priority;
  442. UpdatePriorityInheritance();
  443. KernelContext.CriticalSection.Leave();
  444. }
  445. public KernelResult SetActivity(bool pause)
  446. {
  447. KernelResult result = KernelResult.Success;
  448. KernelContext.CriticalSection.Enter();
  449. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  450. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  451. {
  452. KernelContext.CriticalSection.Leave();
  453. return KernelResult.InvalidState;
  454. }
  455. KernelContext.CriticalSection.Enter();
  456. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  457. {
  458. if (pause)
  459. {
  460. // Pause, the force pause flag should be clear (thread is NOT paused).
  461. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  462. {
  463. _forcePauseFlags |= ThreadSchedState.ThreadPauseFlag;
  464. CombineForcePauseFlags();
  465. }
  466. else
  467. {
  468. result = KernelResult.InvalidState;
  469. }
  470. }
  471. else
  472. {
  473. // Unpause, the force pause flag should be set (thread is paused).
  474. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  475. {
  476. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  477. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  478. if ((oldForcePauseFlags & ~ThreadSchedState.ThreadPauseFlag) == ThreadSchedState.None)
  479. {
  480. ThreadSchedState oldSchedFlags = SchedFlags;
  481. SchedFlags &= ThreadSchedState.LowMask;
  482. AdjustScheduling(oldSchedFlags);
  483. }
  484. }
  485. else
  486. {
  487. result = KernelResult.InvalidState;
  488. }
  489. }
  490. }
  491. KernelContext.CriticalSection.Leave();
  492. KernelContext.CriticalSection.Leave();
  493. return result;
  494. }
  495. public void CancelSynchronization()
  496. {
  497. KernelContext.CriticalSection.Enter();
  498. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  499. {
  500. SyncCancelled = true;
  501. }
  502. else if (Withholder != null)
  503. {
  504. Withholder.Remove(WithholderNode);
  505. SetNewSchedFlags(ThreadSchedState.Running);
  506. Withholder = null;
  507. SyncCancelled = true;
  508. }
  509. else
  510. {
  511. SignaledObj = null;
  512. ObjSyncResult = KernelResult.Cancelled;
  513. SetNewSchedFlags(ThreadSchedState.Running);
  514. SyncCancelled = false;
  515. }
  516. KernelContext.CriticalSection.Leave();
  517. }
  518. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  519. {
  520. KernelContext.CriticalSection.Enter();
  521. bool useOverride = _affinityOverrideCount != 0;
  522. // The value -3 is "do not change the preferred core".
  523. if (newCore == -3)
  524. {
  525. newCore = useOverride ? _preferredCoreOverride : PreferredCore;
  526. if ((newAffinityMask & (1 << newCore)) == 0)
  527. {
  528. KernelContext.CriticalSection.Leave();
  529. return KernelResult.InvalidCombination;
  530. }
  531. }
  532. if (useOverride)
  533. {
  534. _preferredCoreOverride = newCore;
  535. _affinityMaskOverride = newAffinityMask;
  536. }
  537. else
  538. {
  539. long oldAffinityMask = AffinityMask;
  540. PreferredCore = newCore;
  541. AffinityMask = newAffinityMask;
  542. if (oldAffinityMask != newAffinityMask)
  543. {
  544. int oldCore = CurrentCore;
  545. if (CurrentCore >= 0 && ((AffinityMask >> CurrentCore) & 1) == 0)
  546. {
  547. if (PreferredCore < 0)
  548. {
  549. CurrentCore = HighestSetCore(AffinityMask);
  550. }
  551. else
  552. {
  553. CurrentCore = PreferredCore;
  554. }
  555. }
  556. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  557. }
  558. }
  559. KernelContext.CriticalSection.Leave();
  560. return KernelResult.Success;
  561. }
  562. private static int HighestSetCore(long mask)
  563. {
  564. for (int core = KScheduler.CpuCoresCount - 1; core >= 0; core--)
  565. {
  566. if (((mask >> core) & 1) != 0)
  567. {
  568. return core;
  569. }
  570. }
  571. return -1;
  572. }
  573. private void CombineForcePauseFlags()
  574. {
  575. ThreadSchedState oldFlags = SchedFlags;
  576. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  577. SchedFlags = lowNibble | _forcePauseFlags;
  578. AdjustScheduling(oldFlags);
  579. }
  580. private void SetNewSchedFlags(ThreadSchedState newFlags)
  581. {
  582. KernelContext.CriticalSection.Enter();
  583. ThreadSchedState oldFlags = SchedFlags;
  584. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  585. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  586. {
  587. AdjustScheduling(oldFlags);
  588. }
  589. KernelContext.CriticalSection.Leave();
  590. }
  591. public void ReleaseAndResume()
  592. {
  593. KernelContext.CriticalSection.Enter();
  594. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  595. {
  596. if (Withholder != null)
  597. {
  598. Withholder.Remove(WithholderNode);
  599. SetNewSchedFlags(ThreadSchedState.Running);
  600. Withholder = null;
  601. }
  602. else
  603. {
  604. SetNewSchedFlags(ThreadSchedState.Running);
  605. }
  606. }
  607. KernelContext.CriticalSection.Leave();
  608. }
  609. public void Reschedule(ThreadSchedState newFlags)
  610. {
  611. KernelContext.CriticalSection.Enter();
  612. ThreadSchedState oldFlags = SchedFlags;
  613. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  614. (newFlags & ThreadSchedState.LowMask);
  615. AdjustScheduling(oldFlags);
  616. KernelContext.CriticalSection.Leave();
  617. }
  618. public void AddMutexWaiter(KThread requester)
  619. {
  620. AddToMutexWaitersList(requester);
  621. requester.MutexOwner = this;
  622. UpdatePriorityInheritance();
  623. }
  624. public void RemoveMutexWaiter(KThread thread)
  625. {
  626. if (thread._mutexWaiterNode?.List != null)
  627. {
  628. _mutexWaiters.Remove(thread._mutexWaiterNode);
  629. }
  630. thread.MutexOwner = null;
  631. UpdatePriorityInheritance();
  632. }
  633. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  634. {
  635. count = 0;
  636. if (_mutexWaiters.First == null)
  637. {
  638. return null;
  639. }
  640. KThread newMutexOwner = null;
  641. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  642. do
  643. {
  644. // Skip all threads that are not waiting for this mutex.
  645. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  646. {
  647. currentNode = currentNode.Next;
  648. }
  649. if (currentNode == null)
  650. {
  651. break;
  652. }
  653. LinkedListNode<KThread> nextNode = currentNode.Next;
  654. _mutexWaiters.Remove(currentNode);
  655. currentNode.Value.MutexOwner = newMutexOwner;
  656. if (newMutexOwner != null)
  657. {
  658. // New owner was already selected, re-insert on new owner list.
  659. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  660. }
  661. else
  662. {
  663. // New owner not selected yet, use current thread.
  664. newMutexOwner = currentNode.Value;
  665. }
  666. count++;
  667. currentNode = nextNode;
  668. }
  669. while (currentNode != null);
  670. if (newMutexOwner != null)
  671. {
  672. UpdatePriorityInheritance();
  673. newMutexOwner.UpdatePriorityInheritance();
  674. }
  675. return newMutexOwner;
  676. }
  677. private void UpdatePriorityInheritance()
  678. {
  679. // If any of the threads waiting for the mutex has
  680. // higher priority than the current thread, then
  681. // the current thread inherits that priority.
  682. int highestPriority = BasePriority;
  683. if (_mutexWaiters.First != null)
  684. {
  685. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  686. if (waitingDynamicPriority < highestPriority)
  687. {
  688. highestPriority = waitingDynamicPriority;
  689. }
  690. }
  691. if (highestPriority != DynamicPriority)
  692. {
  693. int oldPriority = DynamicPriority;
  694. DynamicPriority = highestPriority;
  695. AdjustSchedulingForNewPriority(oldPriority);
  696. if (MutexOwner != null)
  697. {
  698. // Remove and re-insert to ensure proper sorting based on new priority.
  699. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  700. MutexOwner.AddToMutexWaitersList(this);
  701. MutexOwner.UpdatePriorityInheritance();
  702. }
  703. }
  704. }
  705. private void AddToMutexWaitersList(KThread thread)
  706. {
  707. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  708. int currentPriority = thread.DynamicPriority;
  709. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  710. {
  711. nextPrio = nextPrio.Next;
  712. }
  713. if (nextPrio != null)
  714. {
  715. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  716. }
  717. else
  718. {
  719. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  720. }
  721. }
  722. private void AdjustScheduling(ThreadSchedState oldFlags)
  723. {
  724. if (oldFlags == SchedFlags)
  725. {
  726. return;
  727. }
  728. if (oldFlags == ThreadSchedState.Running)
  729. {
  730. // Was running, now it's stopped.
  731. if (CurrentCore >= 0)
  732. {
  733. _schedulingData.Unschedule(DynamicPriority, CurrentCore, this);
  734. }
  735. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  736. {
  737. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  738. {
  739. _schedulingData.Unsuggest(DynamicPriority, core, this);
  740. }
  741. }
  742. }
  743. else if (SchedFlags == ThreadSchedState.Running)
  744. {
  745. // Was stopped, now it's running.
  746. if (CurrentCore >= 0)
  747. {
  748. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  749. }
  750. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  751. {
  752. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  753. {
  754. _schedulingData.Suggest(DynamicPriority, core, this);
  755. }
  756. }
  757. }
  758. _scheduler.ThreadReselectionRequested = true;
  759. }
  760. private void AdjustSchedulingForNewPriority(int oldPriority)
  761. {
  762. if (SchedFlags != ThreadSchedState.Running)
  763. {
  764. return;
  765. }
  766. // Remove thread from the old priority queues.
  767. if (CurrentCore >= 0)
  768. {
  769. _schedulingData.Unschedule(oldPriority, CurrentCore, this);
  770. }
  771. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  772. {
  773. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  774. {
  775. _schedulingData.Unsuggest(oldPriority, core, this);
  776. }
  777. }
  778. // Add thread to the new priority queues.
  779. KThread currentThread = _scheduler.GetCurrentThread();
  780. if (CurrentCore >= 0)
  781. {
  782. if (currentThread == this)
  783. {
  784. _schedulingData.SchedulePrepend(DynamicPriority, CurrentCore, this);
  785. }
  786. else
  787. {
  788. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  789. }
  790. }
  791. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  792. {
  793. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  794. {
  795. _schedulingData.Suggest(DynamicPriority, core, this);
  796. }
  797. }
  798. _scheduler.ThreadReselectionRequested = true;
  799. }
  800. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  801. {
  802. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount)
  803. {
  804. return;
  805. }
  806. // Remove thread from the old priority queues.
  807. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  808. {
  809. if (((oldAffinityMask >> core) & 1) != 0)
  810. {
  811. if (core == oldCore)
  812. {
  813. _schedulingData.Unschedule(DynamicPriority, core, this);
  814. }
  815. else
  816. {
  817. _schedulingData.Unsuggest(DynamicPriority, core, this);
  818. }
  819. }
  820. }
  821. // Add thread to the new priority queues.
  822. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  823. {
  824. if (((AffinityMask >> core) & 1) != 0)
  825. {
  826. if (core == CurrentCore)
  827. {
  828. _schedulingData.Schedule(DynamicPriority, core, this);
  829. }
  830. else
  831. {
  832. _schedulingData.Suggest(DynamicPriority, core, this);
  833. }
  834. }
  835. }
  836. _scheduler.ThreadReselectionRequested = true;
  837. }
  838. public void SetEntryArguments(long argsPtr, int threadHandle)
  839. {
  840. Context.SetX(0, (ulong)argsPtr);
  841. Context.SetX(1, (ulong)threadHandle);
  842. }
  843. public void TimeUp()
  844. {
  845. ReleaseAndResume();
  846. }
  847. public string GetGuestStackTrace()
  848. {
  849. return Owner.Debugger.GetGuestStackTrace(Context);
  850. }
  851. public void PrintGuestStackTrace()
  852. {
  853. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  854. }
  855. public void Execute()
  856. {
  857. if (Interlocked.CompareExchange(ref _hostThreadRunning, 1, 0) == 0)
  858. {
  859. HostThread.Start();
  860. }
  861. }
  862. private void ThreadStart()
  863. {
  864. KernelStatic.SetKernelContext(KernelContext);
  865. if (_customThreadStart != null)
  866. {
  867. _customThreadStart();
  868. }
  869. else
  870. {
  871. Owner.Context.Execute(Context, _entrypoint);
  872. }
  873. KernelContext.Scheduler.ExitThread(this);
  874. KernelContext.Scheduler.RemoveThread(this);
  875. Context.Dispose();
  876. }
  877. public bool IsCurrentHostThread()
  878. {
  879. return Thread.CurrentThread == HostThread;
  880. }
  881. public override bool IsSignaled()
  882. {
  883. return _hasExited;
  884. }
  885. protected override void Destroy()
  886. {
  887. if (_hasBeenInitialized)
  888. {
  889. FreeResources();
  890. bool released = Owner != null || _hasBeenReleased;
  891. if (Owner != null)
  892. {
  893. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  894. Owner.DecrementReferenceCount();
  895. }
  896. else
  897. {
  898. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  899. }
  900. }
  901. }
  902. private void FreeResources()
  903. {
  904. Owner?.RemoveThread(this);
  905. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  906. {
  907. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  908. }
  909. KernelContext.CriticalSection.Enter();
  910. // Wake up all threads that may be waiting for a mutex being held by this thread.
  911. foreach (KThread thread in _mutexWaiters)
  912. {
  913. thread.MutexOwner = null;
  914. thread._preferredCoreOverride = 0;
  915. thread.ObjSyncResult = KernelResult.InvalidState;
  916. thread.ReleaseAndResume();
  917. }
  918. KernelContext.CriticalSection.Leave();
  919. Owner?.DecrementThreadCountAndTerminateIfZero();
  920. }
  921. }
  922. }