KThread.cs 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Linq;
  8. using System.Text;
  9. using System.Threading;
  10. namespace Ryujinx.HLE.HOS.Kernel.Threading
  11. {
  12. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  13. {
  14. public const int MaxWaitSyncObjects = 64;
  15. private int _hostThreadRunning;
  16. public Thread HostThread { get; private set; }
  17. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  18. public long AffinityMask { get; set; }
  19. public long ThreadUid { get; private set; }
  20. public long TotalTimeRunning { get; set; }
  21. public KSynchronizationObject SignaledObj { get; set; }
  22. public ulong CondVarAddress { get; set; }
  23. private ulong _entrypoint;
  24. public ulong MutexAddress { get; set; }
  25. public KProcess Owner { get; private set; }
  26. private ulong _tlsAddress;
  27. public ulong TlsAddress => _tlsAddress;
  28. public ulong TlsDramAddress { get; private set; }
  29. public KSynchronizationObject[] WaitSyncObjects { get; }
  30. public int[] WaitSyncHandles { get; }
  31. public long LastScheduledTime { get; set; }
  32. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  33. public LinkedList<KThread> Withholder { get; set; }
  34. public LinkedListNode<KThread> WithholderNode { get; set; }
  35. public LinkedListNode<KThread> ProcessListNode { get; set; }
  36. private LinkedList<KThread> _mutexWaiters;
  37. private LinkedListNode<KThread> _mutexWaiterNode;
  38. public KThread MutexOwner { get; private set; }
  39. public int ThreadHandleForUserMutex { get; set; }
  40. private ThreadSchedState _forcePauseFlags;
  41. public KernelResult ObjSyncResult { get; set; }
  42. public int DynamicPriority { get; set; }
  43. public int CurrentCore { get; set; }
  44. public int BasePriority { get; set; }
  45. public int PreferredCore { get; set; }
  46. private long _affinityMaskOverride;
  47. private int _preferredCoreOverride;
  48. #pragma warning disable CS0649
  49. private int _affinityOverrideCount;
  50. #pragma warning restore CS0649
  51. public ThreadSchedState SchedFlags { get; private set; }
  52. private int _shallBeTerminated;
  53. public bool ShallBeTerminated { get => _shallBeTerminated != 0; set => _shallBeTerminated = value ? 1 : 0; }
  54. public bool SyncCancelled { get; set; }
  55. public bool WaitingSync { get; set; }
  56. private bool _hasExited;
  57. private bool _hasBeenInitialized;
  58. private bool _hasBeenReleased;
  59. public bool WaitingInArbitration { get; set; }
  60. private KScheduler _scheduler;
  61. private KSchedulingData _schedulingData;
  62. public long LastPc { get; set; }
  63. public KThread(KernelContext context) : base(context)
  64. {
  65. _scheduler = KernelContext.Scheduler;
  66. _schedulingData = KernelContext.Scheduler.SchedulingData;
  67. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  68. WaitSyncHandles = new int[MaxWaitSyncObjects];
  69. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  70. _mutexWaiters = new LinkedList<KThread>();
  71. }
  72. public KernelResult Initialize(
  73. ulong entrypoint,
  74. ulong argsPtr,
  75. ulong stackTop,
  76. int priority,
  77. int defaultCpuCore,
  78. KProcess owner,
  79. ThreadType type = ThreadType.User,
  80. ThreadStart customHostThreadStart = null)
  81. {
  82. if ((uint)type > 3)
  83. {
  84. throw new ArgumentException($"Invalid thread type \"{type}\".");
  85. }
  86. PreferredCore = defaultCpuCore;
  87. AffinityMask |= 1L << defaultCpuCore;
  88. SchedFlags = type == ThreadType.Dummy
  89. ? ThreadSchedState.Running
  90. : ThreadSchedState.None;
  91. CurrentCore = PreferredCore;
  92. DynamicPriority = priority;
  93. BasePriority = priority;
  94. ObjSyncResult = KernelResult.ThreadNotStarted;
  95. _entrypoint = entrypoint;
  96. if (type == ThreadType.User)
  97. {
  98. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  99. {
  100. return KernelResult.OutOfMemory;
  101. }
  102. TlsDramAddress = owner.MemoryManager.GetDramAddressFromVa(_tlsAddress);
  103. MemoryHelper.FillWithZeros(owner.CpuMemory, (long)_tlsAddress, KTlsPageInfo.TlsEntrySize);
  104. }
  105. bool is64Bits;
  106. if (owner != null)
  107. {
  108. Owner = owner;
  109. owner.IncrementReferenceCount();
  110. owner.IncrementThreadCount();
  111. is64Bits = (owner.MmuFlags & 1) != 0;
  112. }
  113. else
  114. {
  115. is64Bits = true;
  116. }
  117. HostThread = new Thread(customHostThreadStart ?? (() => ThreadStart(entrypoint)));
  118. Context = CpuContext.CreateExecutionContext();
  119. bool isAarch32 = (Owner.MmuFlags & 1) == 0;
  120. Context.IsAarch32 = isAarch32;
  121. Context.SetX(0, argsPtr);
  122. if (isAarch32)
  123. {
  124. Context.SetX(13, (uint)stackTop);
  125. }
  126. else
  127. {
  128. Context.SetX(31, stackTop);
  129. }
  130. Context.CntfrqEl0 = 19200000;
  131. Context.Tpidr = (long)_tlsAddress;
  132. owner.SubscribeThreadEventHandlers(Context);
  133. ThreadUid = KernelContext.NewThreadUid();
  134. HostThread.Name = $"HLE.HostThread.{ThreadUid}";
  135. _hasBeenInitialized = true;
  136. if (owner != null)
  137. {
  138. owner.AddThread(this);
  139. if (owner.IsPaused)
  140. {
  141. KernelContext.CriticalSection.Enter();
  142. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  143. {
  144. KernelContext.CriticalSection.Leave();
  145. return KernelResult.Success;
  146. }
  147. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  148. CombineForcePauseFlags();
  149. KernelContext.CriticalSection.Leave();
  150. }
  151. }
  152. return KernelResult.Success;
  153. }
  154. public KernelResult Start()
  155. {
  156. if (!KernelContext.KernelInitialized)
  157. {
  158. KernelContext.CriticalSection.Enter();
  159. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  160. {
  161. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  162. CombineForcePauseFlags();
  163. }
  164. KernelContext.CriticalSection.Leave();
  165. }
  166. KernelResult result = KernelResult.ThreadTerminating;
  167. KernelContext.CriticalSection.Enter();
  168. if (!ShallBeTerminated)
  169. {
  170. KThread currentThread = KernelContext.Scheduler.GetCurrentThread();
  171. while (SchedFlags != ThreadSchedState.TerminationPending &&
  172. currentThread.SchedFlags != ThreadSchedState.TerminationPending &&
  173. !currentThread.ShallBeTerminated)
  174. {
  175. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  176. {
  177. result = KernelResult.InvalidState;
  178. break;
  179. }
  180. if (currentThread._forcePauseFlags == ThreadSchedState.None)
  181. {
  182. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  183. {
  184. CombineForcePauseFlags();
  185. }
  186. SetNewSchedFlags(ThreadSchedState.Running);
  187. result = KernelResult.Success;
  188. break;
  189. }
  190. else
  191. {
  192. currentThread.CombineForcePauseFlags();
  193. KernelContext.CriticalSection.Leave();
  194. KernelContext.CriticalSection.Enter();
  195. if (currentThread.ShallBeTerminated)
  196. {
  197. break;
  198. }
  199. }
  200. }
  201. }
  202. KernelContext.CriticalSection.Leave();
  203. return result;
  204. }
  205. public void Exit()
  206. {
  207. // TODO: Debug event.
  208. if (Owner != null)
  209. {
  210. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  211. _hasBeenReleased = true;
  212. }
  213. KernelContext.CriticalSection.Enter();
  214. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  215. ExitImpl();
  216. KernelContext.CriticalSection.Leave();
  217. DecrementReferenceCount();
  218. }
  219. public ThreadSchedState PrepareForTermination()
  220. {
  221. KernelContext.CriticalSection.Enter();
  222. ThreadSchedState result;
  223. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  224. {
  225. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  226. {
  227. SchedFlags = ThreadSchedState.TerminationPending;
  228. }
  229. else
  230. {
  231. if (_forcePauseFlags != ThreadSchedState.None)
  232. {
  233. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  234. ThreadSchedState oldSchedFlags = SchedFlags;
  235. SchedFlags &= ThreadSchedState.LowMask;
  236. AdjustScheduling(oldSchedFlags);
  237. }
  238. if (BasePriority >= 0x10)
  239. {
  240. SetPriority(0xF);
  241. }
  242. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  243. {
  244. // TODO: GIC distributor stuffs (sgir changes ect)
  245. }
  246. SignaledObj = null;
  247. ObjSyncResult = KernelResult.ThreadTerminating;
  248. ReleaseAndResume();
  249. }
  250. }
  251. result = SchedFlags;
  252. KernelContext.CriticalSection.Leave();
  253. return result & ThreadSchedState.LowMask;
  254. }
  255. public void Terminate()
  256. {
  257. ThreadSchedState state = PrepareForTermination();
  258. if (state != ThreadSchedState.TerminationPending)
  259. {
  260. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  261. }
  262. }
  263. public void HandlePostSyscall()
  264. {
  265. ThreadSchedState state;
  266. do
  267. {
  268. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  269. {
  270. KernelContext.Scheduler.ExitThread(this);
  271. Exit();
  272. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  273. break;
  274. }
  275. KernelContext.CriticalSection.Enter();
  276. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  277. {
  278. state = ThreadSchedState.TerminationPending;
  279. }
  280. else
  281. {
  282. if (_forcePauseFlags != ThreadSchedState.None)
  283. {
  284. CombineForcePauseFlags();
  285. }
  286. state = ThreadSchedState.Running;
  287. }
  288. KernelContext.CriticalSection.Leave();
  289. } while (state == ThreadSchedState.TerminationPending);
  290. }
  291. private void ExitImpl()
  292. {
  293. KernelContext.CriticalSection.Enter();
  294. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  295. _hasExited = true;
  296. Signal();
  297. KernelContext.CriticalSection.Leave();
  298. }
  299. public KernelResult Sleep(long timeout)
  300. {
  301. KernelContext.CriticalSection.Enter();
  302. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  303. {
  304. KernelContext.CriticalSection.Leave();
  305. return KernelResult.ThreadTerminating;
  306. }
  307. SetNewSchedFlags(ThreadSchedState.Paused);
  308. if (timeout > 0)
  309. {
  310. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  311. }
  312. KernelContext.CriticalSection.Leave();
  313. if (timeout > 0)
  314. {
  315. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  316. }
  317. return 0;
  318. }
  319. public void Yield()
  320. {
  321. KernelContext.CriticalSection.Enter();
  322. if (SchedFlags != ThreadSchedState.Running)
  323. {
  324. KernelContext.CriticalSection.Leave();
  325. KernelContext.Scheduler.ContextSwitch();
  326. return;
  327. }
  328. if (DynamicPriority < KScheduler.PrioritiesCount)
  329. {
  330. // Move current thread to the end of the queue.
  331. _schedulingData.Reschedule(DynamicPriority, CurrentCore, this);
  332. }
  333. _scheduler.ThreadReselectionRequested = true;
  334. KernelContext.CriticalSection.Leave();
  335. KernelContext.Scheduler.ContextSwitch();
  336. }
  337. public void YieldWithLoadBalancing()
  338. {
  339. KernelContext.CriticalSection.Enter();
  340. if (SchedFlags != ThreadSchedState.Running)
  341. {
  342. KernelContext.CriticalSection.Leave();
  343. KernelContext.Scheduler.ContextSwitch();
  344. return;
  345. }
  346. int prio = DynamicPriority;
  347. int core = CurrentCore;
  348. KThread nextThreadOnCurrentQueue = null;
  349. if (DynamicPriority < KScheduler.PrioritiesCount)
  350. {
  351. // Move current thread to the end of the queue.
  352. _schedulingData.Reschedule(prio, core, this);
  353. Func<KThread, bool> predicate = x => x.DynamicPriority == prio;
  354. nextThreadOnCurrentQueue = _schedulingData.ScheduledThreads(core).FirstOrDefault(predicate);
  355. }
  356. IEnumerable<KThread> SuitableCandidates()
  357. {
  358. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  359. {
  360. int srcCore = thread.CurrentCore;
  361. if (srcCore >= 0)
  362. {
  363. KThread selectedSrcCore = _scheduler.CoreContexts[srcCore].SelectedThread;
  364. if (selectedSrcCore == thread || ((selectedSrcCore?.DynamicPriority ?? 2) < 2))
  365. {
  366. continue;
  367. }
  368. }
  369. // If the candidate was scheduled after the current thread, then it's not worth it,
  370. // unless the priority is higher than the current one.
  371. if (nextThreadOnCurrentQueue.LastScheduledTime >= thread.LastScheduledTime ||
  372. nextThreadOnCurrentQueue.DynamicPriority < thread.DynamicPriority)
  373. {
  374. yield return thread;
  375. }
  376. }
  377. }
  378. KThread dst = SuitableCandidates().FirstOrDefault(x => x.DynamicPriority <= prio);
  379. if (dst != null)
  380. {
  381. _schedulingData.TransferToCore(dst.DynamicPriority, core, dst);
  382. _scheduler.ThreadReselectionRequested = true;
  383. }
  384. if (this != nextThreadOnCurrentQueue)
  385. {
  386. _scheduler.ThreadReselectionRequested = true;
  387. }
  388. KernelContext.CriticalSection.Leave();
  389. KernelContext.Scheduler.ContextSwitch();
  390. }
  391. public void YieldAndWaitForLoadBalancing()
  392. {
  393. KernelContext.CriticalSection.Enter();
  394. if (SchedFlags != ThreadSchedState.Running)
  395. {
  396. KernelContext.CriticalSection.Leave();
  397. KernelContext.Scheduler.ContextSwitch();
  398. return;
  399. }
  400. int core = CurrentCore;
  401. _schedulingData.TransferToCore(DynamicPriority, -1, this);
  402. KThread selectedThread = null;
  403. if (!_schedulingData.ScheduledThreads(core).Any())
  404. {
  405. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  406. {
  407. if (thread.CurrentCore < 0)
  408. {
  409. continue;
  410. }
  411. KThread firstCandidate = _schedulingData.ScheduledThreads(thread.CurrentCore).FirstOrDefault();
  412. if (firstCandidate == thread)
  413. {
  414. continue;
  415. }
  416. if (firstCandidate == null || firstCandidate.DynamicPriority >= 2)
  417. {
  418. _schedulingData.TransferToCore(thread.DynamicPriority, core, thread);
  419. selectedThread = thread;
  420. }
  421. break;
  422. }
  423. }
  424. if (selectedThread != this)
  425. {
  426. _scheduler.ThreadReselectionRequested = true;
  427. }
  428. KernelContext.CriticalSection.Leave();
  429. KernelContext.Scheduler.ContextSwitch();
  430. }
  431. public void SetPriority(int priority)
  432. {
  433. KernelContext.CriticalSection.Enter();
  434. BasePriority = priority;
  435. UpdatePriorityInheritance();
  436. KernelContext.CriticalSection.Leave();
  437. }
  438. public KernelResult SetActivity(bool pause)
  439. {
  440. KernelResult result = KernelResult.Success;
  441. KernelContext.CriticalSection.Enter();
  442. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  443. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  444. {
  445. KernelContext.CriticalSection.Leave();
  446. return KernelResult.InvalidState;
  447. }
  448. KernelContext.CriticalSection.Enter();
  449. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  450. {
  451. if (pause)
  452. {
  453. // Pause, the force pause flag should be clear (thread is NOT paused).
  454. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  455. {
  456. _forcePauseFlags |= ThreadSchedState.ThreadPauseFlag;
  457. CombineForcePauseFlags();
  458. }
  459. else
  460. {
  461. result = KernelResult.InvalidState;
  462. }
  463. }
  464. else
  465. {
  466. // Unpause, the force pause flag should be set (thread is paused).
  467. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  468. {
  469. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  470. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  471. if ((oldForcePauseFlags & ~ThreadSchedState.ThreadPauseFlag) == ThreadSchedState.None)
  472. {
  473. ThreadSchedState oldSchedFlags = SchedFlags;
  474. SchedFlags &= ThreadSchedState.LowMask;
  475. AdjustScheduling(oldSchedFlags);
  476. }
  477. }
  478. else
  479. {
  480. result = KernelResult.InvalidState;
  481. }
  482. }
  483. }
  484. KernelContext.CriticalSection.Leave();
  485. KernelContext.CriticalSection.Leave();
  486. return result;
  487. }
  488. public void CancelSynchronization()
  489. {
  490. KernelContext.CriticalSection.Enter();
  491. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  492. {
  493. SyncCancelled = true;
  494. }
  495. else if (Withholder != null)
  496. {
  497. Withholder.Remove(WithholderNode);
  498. SetNewSchedFlags(ThreadSchedState.Running);
  499. Withholder = null;
  500. SyncCancelled = true;
  501. }
  502. else
  503. {
  504. SignaledObj = null;
  505. ObjSyncResult = KernelResult.Cancelled;
  506. SetNewSchedFlags(ThreadSchedState.Running);
  507. SyncCancelled = false;
  508. }
  509. KernelContext.CriticalSection.Leave();
  510. }
  511. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  512. {
  513. KernelContext.CriticalSection.Enter();
  514. bool useOverride = _affinityOverrideCount != 0;
  515. // The value -3 is "do not change the preferred core".
  516. if (newCore == -3)
  517. {
  518. newCore = useOverride ? _preferredCoreOverride : PreferredCore;
  519. if ((newAffinityMask & (1 << newCore)) == 0)
  520. {
  521. KernelContext.CriticalSection.Leave();
  522. return KernelResult.InvalidCombination;
  523. }
  524. }
  525. if (useOverride)
  526. {
  527. _preferredCoreOverride = newCore;
  528. _affinityMaskOverride = newAffinityMask;
  529. }
  530. else
  531. {
  532. long oldAffinityMask = AffinityMask;
  533. PreferredCore = newCore;
  534. AffinityMask = newAffinityMask;
  535. if (oldAffinityMask != newAffinityMask)
  536. {
  537. int oldCore = CurrentCore;
  538. if (CurrentCore >= 0 && ((AffinityMask >> CurrentCore) & 1) == 0)
  539. {
  540. if (PreferredCore < 0)
  541. {
  542. CurrentCore = HighestSetCore(AffinityMask);
  543. }
  544. else
  545. {
  546. CurrentCore = PreferredCore;
  547. }
  548. }
  549. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  550. }
  551. }
  552. KernelContext.CriticalSection.Leave();
  553. return KernelResult.Success;
  554. }
  555. private static int HighestSetCore(long mask)
  556. {
  557. for (int core = KScheduler.CpuCoresCount - 1; core >= 0; core--)
  558. {
  559. if (((mask >> core) & 1) != 0)
  560. {
  561. return core;
  562. }
  563. }
  564. return -1;
  565. }
  566. private void CombineForcePauseFlags()
  567. {
  568. ThreadSchedState oldFlags = SchedFlags;
  569. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  570. SchedFlags = lowNibble | _forcePauseFlags;
  571. AdjustScheduling(oldFlags);
  572. }
  573. private void SetNewSchedFlags(ThreadSchedState newFlags)
  574. {
  575. KernelContext.CriticalSection.Enter();
  576. ThreadSchedState oldFlags = SchedFlags;
  577. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  578. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  579. {
  580. AdjustScheduling(oldFlags);
  581. }
  582. KernelContext.CriticalSection.Leave();
  583. }
  584. public void ReleaseAndResume()
  585. {
  586. KernelContext.CriticalSection.Enter();
  587. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  588. {
  589. if (Withholder != null)
  590. {
  591. Withholder.Remove(WithholderNode);
  592. SetNewSchedFlags(ThreadSchedState.Running);
  593. Withholder = null;
  594. }
  595. else
  596. {
  597. SetNewSchedFlags(ThreadSchedState.Running);
  598. }
  599. }
  600. KernelContext.CriticalSection.Leave();
  601. }
  602. public void Reschedule(ThreadSchedState newFlags)
  603. {
  604. KernelContext.CriticalSection.Enter();
  605. ThreadSchedState oldFlags = SchedFlags;
  606. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  607. (newFlags & ThreadSchedState.LowMask);
  608. AdjustScheduling(oldFlags);
  609. KernelContext.CriticalSection.Leave();
  610. }
  611. public void AddMutexWaiter(KThread requester)
  612. {
  613. AddToMutexWaitersList(requester);
  614. requester.MutexOwner = this;
  615. UpdatePriorityInheritance();
  616. }
  617. public void RemoveMutexWaiter(KThread thread)
  618. {
  619. if (thread._mutexWaiterNode?.List != null)
  620. {
  621. _mutexWaiters.Remove(thread._mutexWaiterNode);
  622. }
  623. thread.MutexOwner = null;
  624. UpdatePriorityInheritance();
  625. }
  626. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  627. {
  628. count = 0;
  629. if (_mutexWaiters.First == null)
  630. {
  631. return null;
  632. }
  633. KThread newMutexOwner = null;
  634. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  635. do
  636. {
  637. // Skip all threads that are not waiting for this mutex.
  638. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  639. {
  640. currentNode = currentNode.Next;
  641. }
  642. if (currentNode == null)
  643. {
  644. break;
  645. }
  646. LinkedListNode<KThread> nextNode = currentNode.Next;
  647. _mutexWaiters.Remove(currentNode);
  648. currentNode.Value.MutexOwner = newMutexOwner;
  649. if (newMutexOwner != null)
  650. {
  651. // New owner was already selected, re-insert on new owner list.
  652. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  653. }
  654. else
  655. {
  656. // New owner not selected yet, use current thread.
  657. newMutexOwner = currentNode.Value;
  658. }
  659. count++;
  660. currentNode = nextNode;
  661. }
  662. while (currentNode != null);
  663. if (newMutexOwner != null)
  664. {
  665. UpdatePriorityInheritance();
  666. newMutexOwner.UpdatePriorityInheritance();
  667. }
  668. return newMutexOwner;
  669. }
  670. private void UpdatePriorityInheritance()
  671. {
  672. // If any of the threads waiting for the mutex has
  673. // higher priority than the current thread, then
  674. // the current thread inherits that priority.
  675. int highestPriority = BasePriority;
  676. if (_mutexWaiters.First != null)
  677. {
  678. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  679. if (waitingDynamicPriority < highestPriority)
  680. {
  681. highestPriority = waitingDynamicPriority;
  682. }
  683. }
  684. if (highestPriority != DynamicPriority)
  685. {
  686. int oldPriority = DynamicPriority;
  687. DynamicPriority = highestPriority;
  688. AdjustSchedulingForNewPriority(oldPriority);
  689. if (MutexOwner != null)
  690. {
  691. // Remove and re-insert to ensure proper sorting based on new priority.
  692. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  693. MutexOwner.AddToMutexWaitersList(this);
  694. MutexOwner.UpdatePriorityInheritance();
  695. }
  696. }
  697. }
  698. private void AddToMutexWaitersList(KThread thread)
  699. {
  700. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  701. int currentPriority = thread.DynamicPriority;
  702. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  703. {
  704. nextPrio = nextPrio.Next;
  705. }
  706. if (nextPrio != null)
  707. {
  708. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  709. }
  710. else
  711. {
  712. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  713. }
  714. }
  715. private void AdjustScheduling(ThreadSchedState oldFlags)
  716. {
  717. if (oldFlags == SchedFlags)
  718. {
  719. return;
  720. }
  721. if (oldFlags == ThreadSchedState.Running)
  722. {
  723. // Was running, now it's stopped.
  724. if (CurrentCore >= 0)
  725. {
  726. _schedulingData.Unschedule(DynamicPriority, CurrentCore, this);
  727. }
  728. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  729. {
  730. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  731. {
  732. _schedulingData.Unsuggest(DynamicPriority, core, this);
  733. }
  734. }
  735. }
  736. else if (SchedFlags == ThreadSchedState.Running)
  737. {
  738. // Was stopped, now it's running.
  739. if (CurrentCore >= 0)
  740. {
  741. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  742. }
  743. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  744. {
  745. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  746. {
  747. _schedulingData.Suggest(DynamicPriority, core, this);
  748. }
  749. }
  750. }
  751. _scheduler.ThreadReselectionRequested = true;
  752. }
  753. private void AdjustSchedulingForNewPriority(int oldPriority)
  754. {
  755. if (SchedFlags != ThreadSchedState.Running)
  756. {
  757. return;
  758. }
  759. // Remove thread from the old priority queues.
  760. if (CurrentCore >= 0)
  761. {
  762. _schedulingData.Unschedule(oldPriority, CurrentCore, this);
  763. }
  764. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  765. {
  766. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  767. {
  768. _schedulingData.Unsuggest(oldPriority, core, this);
  769. }
  770. }
  771. // Add thread to the new priority queues.
  772. KThread currentThread = _scheduler.GetCurrentThread();
  773. if (CurrentCore >= 0)
  774. {
  775. if (currentThread == this)
  776. {
  777. _schedulingData.SchedulePrepend(DynamicPriority, CurrentCore, this);
  778. }
  779. else
  780. {
  781. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  782. }
  783. }
  784. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  785. {
  786. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  787. {
  788. _schedulingData.Suggest(DynamicPriority, core, this);
  789. }
  790. }
  791. _scheduler.ThreadReselectionRequested = true;
  792. }
  793. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  794. {
  795. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount)
  796. {
  797. return;
  798. }
  799. // Remove thread from the old priority queues.
  800. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  801. {
  802. if (((oldAffinityMask >> core) & 1) != 0)
  803. {
  804. if (core == oldCore)
  805. {
  806. _schedulingData.Unschedule(DynamicPriority, core, this);
  807. }
  808. else
  809. {
  810. _schedulingData.Unsuggest(DynamicPriority, core, this);
  811. }
  812. }
  813. }
  814. // Add thread to the new priority queues.
  815. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  816. {
  817. if (((AffinityMask >> core) & 1) != 0)
  818. {
  819. if (core == CurrentCore)
  820. {
  821. _schedulingData.Schedule(DynamicPriority, core, this);
  822. }
  823. else
  824. {
  825. _schedulingData.Suggest(DynamicPriority, core, this);
  826. }
  827. }
  828. }
  829. _scheduler.ThreadReselectionRequested = true;
  830. }
  831. public void SetEntryArguments(long argsPtr, int threadHandle)
  832. {
  833. Context.SetX(0, (ulong)argsPtr);
  834. Context.SetX(1, (ulong)threadHandle);
  835. }
  836. public void TimeUp()
  837. {
  838. ReleaseAndResume();
  839. }
  840. public string GetGuestStackTrace()
  841. {
  842. return Owner.Debugger.GetGuestStackTrace(Context);
  843. }
  844. public void PrintGuestStackTrace()
  845. {
  846. StringBuilder trace = new StringBuilder();
  847. trace.AppendLine("Guest stack trace:");
  848. trace.AppendLine(GetGuestStackTrace());
  849. Logger.PrintInfo(LogClass.Cpu, trace.ToString());
  850. }
  851. public void Execute()
  852. {
  853. if (Interlocked.CompareExchange(ref _hostThreadRunning, 1, 0) == 0)
  854. {
  855. HostThread.Start();
  856. }
  857. }
  858. private void ThreadStart(ulong entrypoint)
  859. {
  860. Owner.CpuContext.Execute(Context, entrypoint);
  861. ThreadExit();
  862. Context.Dispose();
  863. }
  864. private void ThreadExit()
  865. {
  866. KernelContext.Scheduler.ExitThread(this);
  867. KernelContext.Scheduler.RemoveThread(this);
  868. }
  869. public bool IsCurrentHostThread()
  870. {
  871. return Thread.CurrentThread == HostThread;
  872. }
  873. public override bool IsSignaled()
  874. {
  875. return _hasExited;
  876. }
  877. protected override void Destroy()
  878. {
  879. if (_hasBeenInitialized)
  880. {
  881. FreeResources();
  882. bool released = Owner != null || _hasBeenReleased;
  883. if (Owner != null)
  884. {
  885. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  886. Owner.DecrementReferenceCount();
  887. }
  888. else
  889. {
  890. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  891. }
  892. }
  893. }
  894. private void FreeResources()
  895. {
  896. Owner?.RemoveThread(this);
  897. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  898. {
  899. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  900. }
  901. KernelContext.CriticalSection.Enter();
  902. // Wake up all threads that may be waiting for a mutex being held by this thread.
  903. foreach (KThread thread in _mutexWaiters)
  904. {
  905. thread.MutexOwner = null;
  906. thread._preferredCoreOverride = 0;
  907. thread.ObjSyncResult = KernelResult.InvalidState;
  908. thread.ReleaseAndResume();
  909. }
  910. KernelContext.CriticalSection.Leave();
  911. Owner?.DecrementThreadCountAndTerminateIfZero();
  912. }
  913. }
  914. }