KThread.cs 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Linq;
  8. using System.Text;
  9. using System.Threading;
  10. namespace Ryujinx.HLE.HOS.Kernel.Threading
  11. {
  12. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  13. {
  14. public const int MaxWaitSyncObjects = 64;
  15. private int _hostThreadRunning;
  16. public Thread HostThread { get; private set; }
  17. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  18. public long AffinityMask { get; set; }
  19. public long ThreadUid { get; private set; }
  20. public long TotalTimeRunning { get; set; }
  21. public KSynchronizationObject SignaledObj { get; set; }
  22. public ulong CondVarAddress { get; set; }
  23. private ulong _entrypoint;
  24. public ulong MutexAddress { get; set; }
  25. public KProcess Owner { get; private set; }
  26. private ulong _tlsAddress;
  27. public ulong TlsAddress => _tlsAddress;
  28. public ulong TlsDramAddress { get; private set; }
  29. public KSynchronizationObject[] WaitSyncObjects { get; }
  30. public int[] WaitSyncHandles { get; }
  31. public long LastScheduledTime { get; set; }
  32. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  33. public LinkedList<KThread> Withholder { get; set; }
  34. public LinkedListNode<KThread> WithholderNode { get; set; }
  35. public LinkedListNode<KThread> ProcessListNode { get; set; }
  36. private LinkedList<KThread> _mutexWaiters;
  37. private LinkedListNode<KThread> _mutexWaiterNode;
  38. public KThread MutexOwner { get; private set; }
  39. public int ThreadHandleForUserMutex { get; set; }
  40. private ThreadSchedState _forcePauseFlags;
  41. public KernelResult ObjSyncResult { get; set; }
  42. public int DynamicPriority { get; set; }
  43. public int CurrentCore { get; set; }
  44. public int BasePriority { get; set; }
  45. public int PreferredCore { get; set; }
  46. private long _affinityMaskOverride;
  47. private int _preferredCoreOverride;
  48. #pragma warning disable CS0649
  49. private int _affinityOverrideCount;
  50. #pragma warning restore CS0649
  51. public ThreadSchedState SchedFlags { get; private set; }
  52. private int _shallBeTerminated;
  53. public bool ShallBeTerminated { get => _shallBeTerminated != 0; set => _shallBeTerminated = value ? 1 : 0; }
  54. public bool SyncCancelled { get; set; }
  55. public bool WaitingSync { get; set; }
  56. private bool _hasExited;
  57. private bool _hasBeenInitialized;
  58. private bool _hasBeenReleased;
  59. public bool WaitingInArbitration { get; set; }
  60. private KScheduler _scheduler;
  61. private KSchedulingData _schedulingData;
  62. public long LastPc { get; set; }
  63. public KThread(KernelContext context) : base(context)
  64. {
  65. _scheduler = KernelContext.Scheduler;
  66. _schedulingData = KernelContext.Scheduler.SchedulingData;
  67. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  68. WaitSyncHandles = new int[MaxWaitSyncObjects];
  69. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  70. _mutexWaiters = new LinkedList<KThread>();
  71. }
  72. public KernelResult Initialize(
  73. ulong entrypoint,
  74. ulong argsPtr,
  75. ulong stackTop,
  76. int priority,
  77. int defaultCpuCore,
  78. KProcess owner,
  79. ThreadType type = ThreadType.User,
  80. ThreadStart customHostThreadStart = null)
  81. {
  82. if ((uint)type > 3)
  83. {
  84. throw new ArgumentException($"Invalid thread type \"{type}\".");
  85. }
  86. PreferredCore = defaultCpuCore;
  87. AffinityMask |= 1L << defaultCpuCore;
  88. SchedFlags = type == ThreadType.Dummy
  89. ? ThreadSchedState.Running
  90. : ThreadSchedState.None;
  91. CurrentCore = PreferredCore;
  92. DynamicPriority = priority;
  93. BasePriority = priority;
  94. ObjSyncResult = KernelResult.ThreadNotStarted;
  95. _entrypoint = entrypoint;
  96. if (type == ThreadType.User)
  97. {
  98. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  99. {
  100. return KernelResult.OutOfMemory;
  101. }
  102. TlsDramAddress = owner.MemoryManager.GetDramAddressFromVa(_tlsAddress);
  103. MemoryHelper.FillWithZeros(owner.CpuMemory, (long)_tlsAddress, KTlsPageInfo.TlsEntrySize);
  104. }
  105. bool is64Bits;
  106. if (owner != null)
  107. {
  108. Owner = owner;
  109. owner.IncrementReferenceCount();
  110. owner.IncrementThreadCount();
  111. is64Bits = (owner.MmuFlags & 1) != 0;
  112. }
  113. else
  114. {
  115. is64Bits = true;
  116. }
  117. HostThread = new Thread(customHostThreadStart ?? (() => ThreadStart(entrypoint)));
  118. Context = CpuContext.CreateExecutionContext();
  119. bool isAarch32 = (Owner.MmuFlags & 1) == 0;
  120. Context.IsAarch32 = isAarch32;
  121. Context.SetX(0, argsPtr);
  122. if (isAarch32)
  123. {
  124. Context.SetX(13, (uint)stackTop);
  125. }
  126. else
  127. {
  128. Context.SetX(31, stackTop);
  129. }
  130. Context.CntfrqEl0 = 19200000;
  131. Context.Tpidr = (long)_tlsAddress;
  132. owner.SubscribeThreadEventHandlers(Context);
  133. ThreadUid = KernelContext.NewThreadUid();
  134. HostThread.Name = $"HLE.HostThread.{ThreadUid}";
  135. _hasBeenInitialized = true;
  136. if (owner != null)
  137. {
  138. owner.AddThread(this);
  139. if (owner.IsPaused)
  140. {
  141. KernelContext.CriticalSection.Enter();
  142. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  143. {
  144. KernelContext.CriticalSection.Leave();
  145. return KernelResult.Success;
  146. }
  147. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  148. CombineForcePauseFlags();
  149. KernelContext.CriticalSection.Leave();
  150. }
  151. }
  152. return KernelResult.Success;
  153. }
  154. public KernelResult Start()
  155. {
  156. if (!KernelContext.KernelInitialized)
  157. {
  158. KernelContext.CriticalSection.Enter();
  159. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  160. {
  161. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  162. CombineForcePauseFlags();
  163. }
  164. KernelContext.CriticalSection.Leave();
  165. }
  166. KernelResult result = KernelResult.ThreadTerminating;
  167. KernelContext.CriticalSection.Enter();
  168. if (!ShallBeTerminated)
  169. {
  170. KThread currentThread = KernelContext.Scheduler.GetCurrentThread();
  171. while (SchedFlags != ThreadSchedState.TerminationPending &&
  172. currentThread.SchedFlags != ThreadSchedState.TerminationPending &&
  173. !currentThread.ShallBeTerminated)
  174. {
  175. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  176. {
  177. result = KernelResult.InvalidState;
  178. break;
  179. }
  180. if (currentThread._forcePauseFlags == ThreadSchedState.None)
  181. {
  182. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  183. {
  184. CombineForcePauseFlags();
  185. }
  186. SetNewSchedFlags(ThreadSchedState.Running);
  187. result = KernelResult.Success;
  188. break;
  189. }
  190. else
  191. {
  192. currentThread.CombineForcePauseFlags();
  193. KernelContext.CriticalSection.Leave();
  194. KernelContext.CriticalSection.Enter();
  195. if (currentThread.ShallBeTerminated)
  196. {
  197. break;
  198. }
  199. }
  200. }
  201. }
  202. KernelContext.CriticalSection.Leave();
  203. return result;
  204. }
  205. public void Exit()
  206. {
  207. // TODO: Debug event.
  208. if (Owner != null)
  209. {
  210. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  211. _hasBeenReleased = true;
  212. }
  213. KernelContext.CriticalSection.Enter();
  214. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  215. ExitImpl();
  216. KernelContext.CriticalSection.Leave();
  217. DecrementReferenceCount();
  218. }
  219. public ThreadSchedState PrepareForTermination()
  220. {
  221. KernelContext.CriticalSection.Enter();
  222. ThreadSchedState result;
  223. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  224. {
  225. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  226. {
  227. SchedFlags = ThreadSchedState.TerminationPending;
  228. }
  229. else
  230. {
  231. if (_forcePauseFlags != ThreadSchedState.None)
  232. {
  233. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  234. ThreadSchedState oldSchedFlags = SchedFlags;
  235. SchedFlags &= ThreadSchedState.LowMask;
  236. AdjustScheduling(oldSchedFlags);
  237. }
  238. if (BasePriority >= 0x10)
  239. {
  240. SetPriority(0xF);
  241. }
  242. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  243. {
  244. // TODO: GIC distributor stuffs (sgir changes ect)
  245. }
  246. SignaledObj = null;
  247. ObjSyncResult = KernelResult.ThreadTerminating;
  248. ReleaseAndResume();
  249. }
  250. }
  251. result = SchedFlags;
  252. KernelContext.CriticalSection.Leave();
  253. return result & ThreadSchedState.LowMask;
  254. }
  255. public void Terminate()
  256. {
  257. ThreadSchedState state = PrepareForTermination();
  258. if (state != ThreadSchedState.TerminationPending)
  259. {
  260. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  261. }
  262. }
  263. public void HandlePostSyscall()
  264. {
  265. ThreadSchedState state;
  266. do
  267. {
  268. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  269. {
  270. KernelContext.Scheduler.ExitThread(this);
  271. Exit();
  272. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  273. break;
  274. }
  275. KernelContext.CriticalSection.Enter();
  276. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  277. {
  278. state = ThreadSchedState.TerminationPending;
  279. }
  280. else
  281. {
  282. if (_forcePauseFlags != ThreadSchedState.None)
  283. {
  284. CombineForcePauseFlags();
  285. }
  286. state = ThreadSchedState.Running;
  287. }
  288. KernelContext.CriticalSection.Leave();
  289. } while (state == ThreadSchedState.TerminationPending);
  290. }
  291. private void ExitImpl()
  292. {
  293. KernelContext.CriticalSection.Enter();
  294. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  295. _hasExited = true;
  296. Signal();
  297. KernelContext.CriticalSection.Leave();
  298. }
  299. public KernelResult Sleep(long timeout)
  300. {
  301. KernelContext.CriticalSection.Enter();
  302. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  303. {
  304. KernelContext.CriticalSection.Leave();
  305. return KernelResult.ThreadTerminating;
  306. }
  307. SetNewSchedFlags(ThreadSchedState.Paused);
  308. if (timeout > 0)
  309. {
  310. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  311. }
  312. KernelContext.CriticalSection.Leave();
  313. if (timeout > 0)
  314. {
  315. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  316. }
  317. return 0;
  318. }
  319. public void Yield()
  320. {
  321. KernelContext.CriticalSection.Enter();
  322. if (SchedFlags != ThreadSchedState.Running)
  323. {
  324. KernelContext.CriticalSection.Leave();
  325. KernelContext.Scheduler.ContextSwitch();
  326. return;
  327. }
  328. if (DynamicPriority < KScheduler.PrioritiesCount)
  329. {
  330. // Move current thread to the end of the queue.
  331. _schedulingData.Reschedule(DynamicPriority, CurrentCore, this);
  332. }
  333. _scheduler.ThreadReselectionRequested = true;
  334. KernelContext.CriticalSection.Leave();
  335. KernelContext.Scheduler.ContextSwitch();
  336. }
  337. public void YieldWithLoadBalancing()
  338. {
  339. KernelContext.CriticalSection.Enter();
  340. if (SchedFlags != ThreadSchedState.Running)
  341. {
  342. KernelContext.CriticalSection.Leave();
  343. KernelContext.Scheduler.ContextSwitch();
  344. return;
  345. }
  346. int prio = DynamicPriority;
  347. int core = CurrentCore;
  348. KThread nextThreadOnCurrentQueue = null;
  349. if (DynamicPriority < KScheduler.PrioritiesCount)
  350. {
  351. // Move current thread to the end of the queue.
  352. _schedulingData.Reschedule(prio, core, this);
  353. Func<KThread, bool> predicate = x => x.DynamicPriority == prio;
  354. nextThreadOnCurrentQueue = _schedulingData.ScheduledThreads(core).FirstOrDefault(predicate);
  355. }
  356. IEnumerable<KThread> SuitableCandidates()
  357. {
  358. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  359. {
  360. int srcCore = thread.CurrentCore;
  361. if (srcCore >= 0)
  362. {
  363. KThread selectedSrcCore = _scheduler.CoreContexts[srcCore].SelectedThread;
  364. if (selectedSrcCore == thread || ((selectedSrcCore?.DynamicPriority ?? 2) < 2))
  365. {
  366. continue;
  367. }
  368. }
  369. // If the candidate was scheduled after the current thread, then it's not worth it,
  370. // unless the priority is higher than the current one.
  371. if (nextThreadOnCurrentQueue.LastScheduledTime >= thread.LastScheduledTime ||
  372. nextThreadOnCurrentQueue.DynamicPriority < thread.DynamicPriority)
  373. {
  374. yield return thread;
  375. }
  376. }
  377. }
  378. KThread dst = SuitableCandidates().FirstOrDefault(x => x.DynamicPriority <= prio);
  379. if (dst != null)
  380. {
  381. _schedulingData.TransferToCore(dst.DynamicPriority, core, dst);
  382. _scheduler.ThreadReselectionRequested = true;
  383. }
  384. if (this != nextThreadOnCurrentQueue)
  385. {
  386. _scheduler.ThreadReselectionRequested = true;
  387. }
  388. KernelContext.CriticalSection.Leave();
  389. KernelContext.Scheduler.ContextSwitch();
  390. }
  391. public void YieldAndWaitForLoadBalancing()
  392. {
  393. KernelContext.CriticalSection.Enter();
  394. if (SchedFlags != ThreadSchedState.Running)
  395. {
  396. KernelContext.CriticalSection.Leave();
  397. KernelContext.Scheduler.ContextSwitch();
  398. return;
  399. }
  400. int core = CurrentCore;
  401. _schedulingData.TransferToCore(DynamicPriority, -1, this);
  402. KThread selectedThread = null;
  403. if (!_schedulingData.ScheduledThreads(core).Any())
  404. {
  405. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  406. {
  407. if (thread.CurrentCore < 0)
  408. {
  409. continue;
  410. }
  411. KThread firstCandidate = _schedulingData.ScheduledThreads(thread.CurrentCore).FirstOrDefault();
  412. if (firstCandidate == thread)
  413. {
  414. continue;
  415. }
  416. if (firstCandidate == null || firstCandidate.DynamicPriority >= 2)
  417. {
  418. _schedulingData.TransferToCore(thread.DynamicPriority, core, thread);
  419. selectedThread = thread;
  420. }
  421. break;
  422. }
  423. }
  424. if (selectedThread != this)
  425. {
  426. _scheduler.ThreadReselectionRequested = true;
  427. }
  428. KernelContext.CriticalSection.Leave();
  429. KernelContext.Scheduler.ContextSwitch();
  430. }
  431. public void SetPriority(int priority)
  432. {
  433. KernelContext.CriticalSection.Enter();
  434. BasePriority = priority;
  435. UpdatePriorityInheritance();
  436. KernelContext.CriticalSection.Leave();
  437. }
  438. public KernelResult SetActivity(bool pause)
  439. {
  440. KernelResult result = KernelResult.Success;
  441. KernelContext.CriticalSection.Enter();
  442. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  443. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  444. {
  445. KernelContext.CriticalSection.Leave();
  446. return KernelResult.InvalidState;
  447. }
  448. KernelContext.CriticalSection.Enter();
  449. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  450. {
  451. if (pause)
  452. {
  453. // Pause, the force pause flag should be clear (thread is NOT paused).
  454. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  455. {
  456. _forcePauseFlags |= ThreadSchedState.ThreadPauseFlag;
  457. CombineForcePauseFlags();
  458. }
  459. else
  460. {
  461. result = KernelResult.InvalidState;
  462. }
  463. }
  464. else
  465. {
  466. // Unpause, the force pause flag should be set (thread is paused).
  467. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  468. {
  469. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  470. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  471. if ((oldForcePauseFlags & ~ThreadSchedState.ThreadPauseFlag) == ThreadSchedState.None)
  472. {
  473. ThreadSchedState oldSchedFlags = SchedFlags;
  474. SchedFlags &= ThreadSchedState.LowMask;
  475. AdjustScheduling(oldSchedFlags);
  476. }
  477. }
  478. else
  479. {
  480. result = KernelResult.InvalidState;
  481. }
  482. }
  483. }
  484. KernelContext.CriticalSection.Leave();
  485. KernelContext.CriticalSection.Leave();
  486. return result;
  487. }
  488. public void CancelSynchronization()
  489. {
  490. KernelContext.CriticalSection.Enter();
  491. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  492. {
  493. SyncCancelled = true;
  494. }
  495. else if (Withholder != null)
  496. {
  497. Withholder.Remove(WithholderNode);
  498. SetNewSchedFlags(ThreadSchedState.Running);
  499. Withholder = null;
  500. SyncCancelled = true;
  501. }
  502. else
  503. {
  504. SignaledObj = null;
  505. ObjSyncResult = KernelResult.Cancelled;
  506. SetNewSchedFlags(ThreadSchedState.Running);
  507. SyncCancelled = false;
  508. }
  509. KernelContext.CriticalSection.Leave();
  510. }
  511. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  512. {
  513. KernelContext.CriticalSection.Enter();
  514. bool useOverride = _affinityOverrideCount != 0;
  515. // The value -3 is "do not change the preferred core".
  516. if (newCore == -3)
  517. {
  518. newCore = useOverride ? _preferredCoreOverride : PreferredCore;
  519. if ((newAffinityMask & (1 << newCore)) == 0)
  520. {
  521. KernelContext.CriticalSection.Leave();
  522. return KernelResult.InvalidCombination;
  523. }
  524. }
  525. if (useOverride)
  526. {
  527. _preferredCoreOverride = newCore;
  528. _affinityMaskOverride = newAffinityMask;
  529. }
  530. else
  531. {
  532. long oldAffinityMask = AffinityMask;
  533. PreferredCore = newCore;
  534. AffinityMask = newAffinityMask;
  535. if (oldAffinityMask != newAffinityMask)
  536. {
  537. int oldCore = CurrentCore;
  538. if (CurrentCore >= 0 && ((AffinityMask >> CurrentCore) & 1) == 0)
  539. {
  540. if (PreferredCore < 0)
  541. {
  542. CurrentCore = HighestSetCore(AffinityMask);
  543. }
  544. else
  545. {
  546. CurrentCore = PreferredCore;
  547. }
  548. }
  549. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  550. }
  551. }
  552. KernelContext.CriticalSection.Leave();
  553. return KernelResult.Success;
  554. }
  555. private static int HighestSetCore(long mask)
  556. {
  557. for (int core = KScheduler.CpuCoresCount - 1; core >= 0; core--)
  558. {
  559. if (((mask >> core) & 1) != 0)
  560. {
  561. return core;
  562. }
  563. }
  564. return -1;
  565. }
  566. private void CombineForcePauseFlags()
  567. {
  568. ThreadSchedState oldFlags = SchedFlags;
  569. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  570. SchedFlags = lowNibble | _forcePauseFlags;
  571. AdjustScheduling(oldFlags);
  572. }
  573. private void SetNewSchedFlags(ThreadSchedState newFlags)
  574. {
  575. KernelContext.CriticalSection.Enter();
  576. ThreadSchedState oldFlags = SchedFlags;
  577. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  578. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  579. {
  580. AdjustScheduling(oldFlags);
  581. }
  582. KernelContext.CriticalSection.Leave();
  583. }
  584. public void ReleaseAndResume()
  585. {
  586. KernelContext.CriticalSection.Enter();
  587. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  588. {
  589. if (Withholder != null)
  590. {
  591. Withholder.Remove(WithholderNode);
  592. SetNewSchedFlags(ThreadSchedState.Running);
  593. Withholder = null;
  594. }
  595. else
  596. {
  597. SetNewSchedFlags(ThreadSchedState.Running);
  598. }
  599. }
  600. KernelContext.CriticalSection.Leave();
  601. }
  602. public void Reschedule(ThreadSchedState newFlags)
  603. {
  604. KernelContext.CriticalSection.Enter();
  605. ThreadSchedState oldFlags = SchedFlags;
  606. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  607. (newFlags & ThreadSchedState.LowMask);
  608. AdjustScheduling(oldFlags);
  609. KernelContext.CriticalSection.Leave();
  610. }
  611. public void AddMutexWaiter(KThread requester)
  612. {
  613. AddToMutexWaitersList(requester);
  614. requester.MutexOwner = this;
  615. UpdatePriorityInheritance();
  616. }
  617. public void RemoveMutexWaiter(KThread thread)
  618. {
  619. if (thread._mutexWaiterNode?.List != null)
  620. {
  621. _mutexWaiters.Remove(thread._mutexWaiterNode);
  622. }
  623. thread.MutexOwner = null;
  624. UpdatePriorityInheritance();
  625. }
  626. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  627. {
  628. count = 0;
  629. if (_mutexWaiters.First == null)
  630. {
  631. return null;
  632. }
  633. KThread newMutexOwner = null;
  634. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  635. do
  636. {
  637. // Skip all threads that are not waiting for this mutex.
  638. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  639. {
  640. currentNode = currentNode.Next;
  641. }
  642. if (currentNode == null)
  643. {
  644. break;
  645. }
  646. LinkedListNode<KThread> nextNode = currentNode.Next;
  647. _mutexWaiters.Remove(currentNode);
  648. currentNode.Value.MutexOwner = newMutexOwner;
  649. if (newMutexOwner != null)
  650. {
  651. // New owner was already selected, re-insert on new owner list.
  652. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  653. }
  654. else
  655. {
  656. // New owner not selected yet, use current thread.
  657. newMutexOwner = currentNode.Value;
  658. }
  659. count++;
  660. currentNode = nextNode;
  661. }
  662. while (currentNode != null);
  663. if (newMutexOwner != null)
  664. {
  665. UpdatePriorityInheritance();
  666. newMutexOwner.UpdatePriorityInheritance();
  667. }
  668. return newMutexOwner;
  669. }
  670. private void UpdatePriorityInheritance()
  671. {
  672. // If any of the threads waiting for the mutex has
  673. // higher priority than the current thread, then
  674. // the current thread inherits that priority.
  675. int highestPriority = BasePriority;
  676. if (_mutexWaiters.First != null)
  677. {
  678. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  679. if (waitingDynamicPriority < highestPriority)
  680. {
  681. highestPriority = waitingDynamicPriority;
  682. }
  683. }
  684. if (highestPriority != DynamicPriority)
  685. {
  686. int oldPriority = DynamicPriority;
  687. DynamicPriority = highestPriority;
  688. AdjustSchedulingForNewPriority(oldPriority);
  689. if (MutexOwner != null)
  690. {
  691. // Remove and re-insert to ensure proper sorting based on new priority.
  692. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  693. MutexOwner.AddToMutexWaitersList(this);
  694. MutexOwner.UpdatePriorityInheritance();
  695. }
  696. }
  697. }
  698. private void AddToMutexWaitersList(KThread thread)
  699. {
  700. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  701. int currentPriority = thread.DynamicPriority;
  702. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  703. {
  704. nextPrio = nextPrio.Next;
  705. }
  706. if (nextPrio != null)
  707. {
  708. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  709. }
  710. else
  711. {
  712. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  713. }
  714. }
  715. private void AdjustScheduling(ThreadSchedState oldFlags)
  716. {
  717. if (oldFlags == SchedFlags)
  718. {
  719. return;
  720. }
  721. if (oldFlags == ThreadSchedState.Running)
  722. {
  723. // Was running, now it's stopped.
  724. if (CurrentCore >= 0)
  725. {
  726. _schedulingData.Unschedule(DynamicPriority, CurrentCore, this);
  727. }
  728. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  729. {
  730. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  731. {
  732. _schedulingData.Unsuggest(DynamicPriority, core, this);
  733. }
  734. }
  735. }
  736. else if (SchedFlags == ThreadSchedState.Running)
  737. {
  738. // Was stopped, now it's running.
  739. if (CurrentCore >= 0)
  740. {
  741. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  742. }
  743. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  744. {
  745. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  746. {
  747. _schedulingData.Suggest(DynamicPriority, core, this);
  748. }
  749. }
  750. }
  751. _scheduler.ThreadReselectionRequested = true;
  752. }
  753. private void AdjustSchedulingForNewPriority(int oldPriority)
  754. {
  755. if (SchedFlags != ThreadSchedState.Running)
  756. {
  757. return;
  758. }
  759. // Remove thread from the old priority queues.
  760. if (CurrentCore >= 0)
  761. {
  762. _schedulingData.Unschedule(oldPriority, CurrentCore, this);
  763. }
  764. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  765. {
  766. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  767. {
  768. _schedulingData.Unsuggest(oldPriority, core, this);
  769. }
  770. }
  771. // Add thread to the new priority queues.
  772. KThread currentThread = _scheduler.GetCurrentThread();
  773. if (CurrentCore >= 0)
  774. {
  775. if (currentThread == this)
  776. {
  777. _schedulingData.SchedulePrepend(DynamicPriority, CurrentCore, this);
  778. }
  779. else
  780. {
  781. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  782. }
  783. }
  784. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  785. {
  786. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  787. {
  788. _schedulingData.Suggest(DynamicPriority, core, this);
  789. }
  790. }
  791. _scheduler.ThreadReselectionRequested = true;
  792. }
  793. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  794. {
  795. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount)
  796. {
  797. return;
  798. }
  799. // Remove thread from the old priority queues.
  800. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  801. {
  802. if (((oldAffinityMask >> core) & 1) != 0)
  803. {
  804. if (core == oldCore)
  805. {
  806. _schedulingData.Unschedule(DynamicPriority, core, this);
  807. }
  808. else
  809. {
  810. _schedulingData.Unsuggest(DynamicPriority, core, this);
  811. }
  812. }
  813. }
  814. // Add thread to the new priority queues.
  815. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  816. {
  817. if (((AffinityMask >> core) & 1) != 0)
  818. {
  819. if (core == CurrentCore)
  820. {
  821. _schedulingData.Schedule(DynamicPriority, core, this);
  822. }
  823. else
  824. {
  825. _schedulingData.Suggest(DynamicPriority, core, this);
  826. }
  827. }
  828. }
  829. _scheduler.ThreadReselectionRequested = true;
  830. }
  831. public void SetEntryArguments(long argsPtr, int threadHandle)
  832. {
  833. Context.SetX(0, (ulong)argsPtr);
  834. Context.SetX(1, (ulong)threadHandle);
  835. }
  836. public void TimeUp()
  837. {
  838. ReleaseAndResume();
  839. }
  840. public string GetGuestStackTrace()
  841. {
  842. return Owner.Debugger.GetGuestStackTrace(Context);
  843. }
  844. public void PrintGuestStackTrace()
  845. {
  846. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  847. }
  848. public void Execute()
  849. {
  850. if (Interlocked.CompareExchange(ref _hostThreadRunning, 1, 0) == 0)
  851. {
  852. HostThread.Start();
  853. }
  854. }
  855. private void ThreadStart(ulong entrypoint)
  856. {
  857. Owner.CpuContext.Execute(Context, entrypoint);
  858. ThreadExit();
  859. Context.Dispose();
  860. }
  861. private void ThreadExit()
  862. {
  863. KernelContext.Scheduler.ExitThread(this);
  864. KernelContext.Scheduler.RemoveThread(this);
  865. }
  866. public bool IsCurrentHostThread()
  867. {
  868. return Thread.CurrentThread == HostThread;
  869. }
  870. public override bool IsSignaled()
  871. {
  872. return _hasExited;
  873. }
  874. protected override void Destroy()
  875. {
  876. if (_hasBeenInitialized)
  877. {
  878. FreeResources();
  879. bool released = Owner != null || _hasBeenReleased;
  880. if (Owner != null)
  881. {
  882. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  883. Owner.DecrementReferenceCount();
  884. }
  885. else
  886. {
  887. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  888. }
  889. }
  890. }
  891. private void FreeResources()
  892. {
  893. Owner?.RemoveThread(this);
  894. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  895. {
  896. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  897. }
  898. KernelContext.CriticalSection.Enter();
  899. // Wake up all threads that may be waiting for a mutex being held by this thread.
  900. foreach (KThread thread in _mutexWaiters)
  901. {
  902. thread.MutexOwner = null;
  903. thread._preferredCoreOverride = 0;
  904. thread.ObjSyncResult = KernelResult.InvalidState;
  905. thread.ReleaseAndResume();
  906. }
  907. KernelContext.CriticalSection.Leave();
  908. Owner?.DecrementThreadCountAndTerminateIfZero();
  909. }
  910. }
  911. }