KThread.cs 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Linq;
  8. using System.Text;
  9. using System.Threading;
  10. namespace Ryujinx.HLE.HOS.Kernel.Threading
  11. {
  12. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  13. {
  14. public const int MaxWaitSyncObjects = 64;
  15. private int _hostThreadRunning;
  16. public Thread HostThread { get; private set; }
  17. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  18. public long AffinityMask { get; set; }
  19. public long ThreadUid { get; private set; }
  20. public long TotalTimeRunning { get; set; }
  21. public KSynchronizationObject SignaledObj { get; set; }
  22. public ulong CondVarAddress { get; set; }
  23. private ulong _entrypoint;
  24. public ulong MutexAddress { get; set; }
  25. public KProcess Owner { get; private set; }
  26. private ulong _tlsAddress;
  27. public ulong TlsAddress => _tlsAddress;
  28. public ulong TlsDramAddress { get; private set; }
  29. public KSynchronizationObject[] WaitSyncObjects { get; }
  30. public int[] WaitSyncHandles { get; }
  31. public long LastScheduledTime { get; set; }
  32. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  33. public LinkedList<KThread> Withholder { get; set; }
  34. public LinkedListNode<KThread> WithholderNode { get; set; }
  35. public LinkedListNode<KThread> ProcessListNode { get; set; }
  36. private LinkedList<KThread> _mutexWaiters;
  37. private LinkedListNode<KThread> _mutexWaiterNode;
  38. public KThread MutexOwner { get; private set; }
  39. public int ThreadHandleForUserMutex { get; set; }
  40. private ThreadSchedState _forcePauseFlags;
  41. public KernelResult ObjSyncResult { get; set; }
  42. public int DynamicPriority { get; set; }
  43. public int CurrentCore { get; set; }
  44. public int BasePriority { get; set; }
  45. public int PreferredCore { get; set; }
  46. private long _affinityMaskOverride;
  47. private int _preferredCoreOverride;
  48. #pragma warning disable CS0649
  49. private int _affinityOverrideCount;
  50. #pragma warning restore CS0649
  51. public ThreadSchedState SchedFlags { get; private set; }
  52. private int _shallBeTerminated;
  53. public bool ShallBeTerminated { get => _shallBeTerminated != 0; set => _shallBeTerminated = value ? 1 : 0; }
  54. public bool SyncCancelled { get; set; }
  55. public bool WaitingSync { get; set; }
  56. private bool _hasExited;
  57. private bool _hasBeenInitialized;
  58. private bool _hasBeenReleased;
  59. public bool WaitingInArbitration { get; set; }
  60. private KScheduler _scheduler;
  61. private KSchedulingData _schedulingData;
  62. public long LastPc { get; set; }
  63. public KThread(KernelContext context) : base(context)
  64. {
  65. _scheduler = KernelContext.Scheduler;
  66. _schedulingData = KernelContext.Scheduler.SchedulingData;
  67. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  68. WaitSyncHandles = new int[MaxWaitSyncObjects];
  69. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  70. _mutexWaiters = new LinkedList<KThread>();
  71. }
  72. public KernelResult Initialize(
  73. ulong entrypoint,
  74. ulong argsPtr,
  75. ulong stackTop,
  76. int priority,
  77. int defaultCpuCore,
  78. KProcess owner,
  79. ThreadType type = ThreadType.User,
  80. ThreadStart customHostThreadStart = null)
  81. {
  82. if ((uint)type > 3)
  83. {
  84. throw new ArgumentException($"Invalid thread type \"{type}\".");
  85. }
  86. PreferredCore = defaultCpuCore;
  87. AffinityMask |= 1L << defaultCpuCore;
  88. SchedFlags = type == ThreadType.Dummy
  89. ? ThreadSchedState.Running
  90. : ThreadSchedState.None;
  91. CurrentCore = PreferredCore;
  92. DynamicPriority = priority;
  93. BasePriority = priority;
  94. ObjSyncResult = KernelResult.ThreadNotStarted;
  95. _entrypoint = entrypoint;
  96. if (type == ThreadType.User)
  97. {
  98. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  99. {
  100. return KernelResult.OutOfMemory;
  101. }
  102. TlsDramAddress = owner.MemoryManager.GetDramAddressFromVa(_tlsAddress);
  103. MemoryHelper.FillWithZeros(owner.CpuMemory, (long)_tlsAddress, KTlsPageInfo.TlsEntrySize);
  104. }
  105. bool is64Bits;
  106. if (owner != null)
  107. {
  108. Owner = owner;
  109. owner.IncrementReferenceCount();
  110. owner.IncrementThreadCount();
  111. is64Bits = (owner.MmuFlags & 1) != 0;
  112. }
  113. else
  114. {
  115. is64Bits = true;
  116. }
  117. HostThread = new Thread(customHostThreadStart ?? (() => ThreadStart(entrypoint)));
  118. Context = CpuContext.CreateExecutionContext();
  119. bool isAarch32 = (Owner.MmuFlags & 1) == 0;
  120. Context.IsAarch32 = isAarch32;
  121. Context.SetX(0, argsPtr);
  122. if (isAarch32)
  123. {
  124. Context.SetX(13, (uint)stackTop);
  125. }
  126. else
  127. {
  128. Context.SetX(31, stackTop);
  129. }
  130. Context.CntfrqEl0 = 19200000;
  131. Context.Tpidr = (long)_tlsAddress;
  132. owner.SubscribeThreadEventHandlers(Context);
  133. ThreadUid = KernelContext.NewThreadUid();
  134. HostThread.Name = $"HLE.HostThread.{ThreadUid}";
  135. _hasBeenInitialized = true;
  136. if (owner != null)
  137. {
  138. owner.AddThread(this);
  139. if (owner.IsPaused)
  140. {
  141. KernelContext.CriticalSection.Enter();
  142. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  143. {
  144. KernelContext.CriticalSection.Leave();
  145. return KernelResult.Success;
  146. }
  147. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  148. CombineForcePauseFlags();
  149. KernelContext.CriticalSection.Leave();
  150. }
  151. }
  152. return KernelResult.Success;
  153. }
  154. public KernelResult Start()
  155. {
  156. if (!KernelContext.KernelInitialized)
  157. {
  158. KernelContext.CriticalSection.Enter();
  159. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  160. {
  161. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  162. CombineForcePauseFlags();
  163. }
  164. KernelContext.CriticalSection.Leave();
  165. }
  166. KernelResult result = KernelResult.ThreadTerminating;
  167. KernelContext.CriticalSection.Enter();
  168. if (!ShallBeTerminated)
  169. {
  170. KThread currentThread = KernelContext.Scheduler.GetCurrentThread();
  171. while (SchedFlags != ThreadSchedState.TerminationPending &&
  172. currentThread.SchedFlags != ThreadSchedState.TerminationPending &&
  173. !currentThread.ShallBeTerminated)
  174. {
  175. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  176. {
  177. result = KernelResult.InvalidState;
  178. break;
  179. }
  180. if (currentThread._forcePauseFlags == ThreadSchedState.None)
  181. {
  182. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  183. {
  184. CombineForcePauseFlags();
  185. }
  186. SetNewSchedFlags(ThreadSchedState.Running);
  187. result = KernelResult.Success;
  188. break;
  189. }
  190. else
  191. {
  192. currentThread.CombineForcePauseFlags();
  193. KernelContext.CriticalSection.Leave();
  194. KernelContext.CriticalSection.Enter();
  195. if (currentThread.ShallBeTerminated)
  196. {
  197. break;
  198. }
  199. }
  200. }
  201. }
  202. KernelContext.CriticalSection.Leave();
  203. return result;
  204. }
  205. public void Exit()
  206. {
  207. // TODO: Debug event.
  208. if (Owner != null)
  209. {
  210. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  211. _hasBeenReleased = true;
  212. }
  213. KernelContext.CriticalSection.Enter();
  214. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  215. ExitImpl();
  216. KernelContext.CriticalSection.Leave();
  217. DecrementReferenceCount();
  218. }
  219. public ThreadSchedState PrepareForTermination()
  220. {
  221. KernelContext.CriticalSection.Enter();
  222. ThreadSchedState result;
  223. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  224. {
  225. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  226. {
  227. SchedFlags = ThreadSchedState.TerminationPending;
  228. }
  229. else
  230. {
  231. if (_forcePauseFlags != ThreadSchedState.None)
  232. {
  233. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  234. ThreadSchedState oldSchedFlags = SchedFlags;
  235. SchedFlags &= ThreadSchedState.LowMask;
  236. AdjustScheduling(oldSchedFlags);
  237. }
  238. if (BasePriority >= 0x10)
  239. {
  240. SetPriority(0xF);
  241. }
  242. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  243. {
  244. // TODO: GIC distributor stuffs (sgir changes ect)
  245. Context.RequestInterrupt();
  246. }
  247. SignaledObj = null;
  248. ObjSyncResult = KernelResult.ThreadTerminating;
  249. ReleaseAndResume();
  250. }
  251. }
  252. result = SchedFlags;
  253. KernelContext.CriticalSection.Leave();
  254. return result & ThreadSchedState.LowMask;
  255. }
  256. public void Terminate()
  257. {
  258. ThreadSchedState state = PrepareForTermination();
  259. if (state != ThreadSchedState.TerminationPending)
  260. {
  261. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  262. }
  263. }
  264. public void HandlePostSyscall()
  265. {
  266. ThreadSchedState state;
  267. do
  268. {
  269. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  270. {
  271. KernelContext.Scheduler.ExitThread(this);
  272. Exit();
  273. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  274. break;
  275. }
  276. KernelContext.CriticalSection.Enter();
  277. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  278. {
  279. state = ThreadSchedState.TerminationPending;
  280. }
  281. else
  282. {
  283. if (_forcePauseFlags != ThreadSchedState.None)
  284. {
  285. CombineForcePauseFlags();
  286. }
  287. state = ThreadSchedState.Running;
  288. }
  289. KernelContext.CriticalSection.Leave();
  290. } while (state == ThreadSchedState.TerminationPending);
  291. }
  292. private void ExitImpl()
  293. {
  294. KernelContext.CriticalSection.Enter();
  295. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  296. _hasExited = true;
  297. Signal();
  298. KernelContext.CriticalSection.Leave();
  299. }
  300. public KernelResult Sleep(long timeout)
  301. {
  302. KernelContext.CriticalSection.Enter();
  303. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  304. {
  305. KernelContext.CriticalSection.Leave();
  306. return KernelResult.ThreadTerminating;
  307. }
  308. SetNewSchedFlags(ThreadSchedState.Paused);
  309. if (timeout > 0)
  310. {
  311. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  312. }
  313. KernelContext.CriticalSection.Leave();
  314. if (timeout > 0)
  315. {
  316. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  317. }
  318. return 0;
  319. }
  320. public void Yield()
  321. {
  322. KernelContext.CriticalSection.Enter();
  323. if (SchedFlags != ThreadSchedState.Running)
  324. {
  325. KernelContext.CriticalSection.Leave();
  326. KernelContext.Scheduler.ContextSwitch();
  327. return;
  328. }
  329. if (DynamicPriority < KScheduler.PrioritiesCount)
  330. {
  331. // Move current thread to the end of the queue.
  332. _schedulingData.Reschedule(DynamicPriority, CurrentCore, this);
  333. }
  334. _scheduler.ThreadReselectionRequested = true;
  335. KernelContext.CriticalSection.Leave();
  336. KernelContext.Scheduler.ContextSwitch();
  337. }
  338. public void YieldWithLoadBalancing()
  339. {
  340. KernelContext.CriticalSection.Enter();
  341. if (SchedFlags != ThreadSchedState.Running)
  342. {
  343. KernelContext.CriticalSection.Leave();
  344. KernelContext.Scheduler.ContextSwitch();
  345. return;
  346. }
  347. int prio = DynamicPriority;
  348. int core = CurrentCore;
  349. KThread nextThreadOnCurrentQueue = null;
  350. if (DynamicPriority < KScheduler.PrioritiesCount)
  351. {
  352. // Move current thread to the end of the queue.
  353. _schedulingData.Reschedule(prio, core, this);
  354. Func<KThread, bool> predicate = x => x.DynamicPriority == prio;
  355. nextThreadOnCurrentQueue = _schedulingData.ScheduledThreads(core).FirstOrDefault(predicate);
  356. }
  357. IEnumerable<KThread> SuitableCandidates()
  358. {
  359. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  360. {
  361. int srcCore = thread.CurrentCore;
  362. if (srcCore >= 0)
  363. {
  364. KThread selectedSrcCore = _scheduler.CoreContexts[srcCore].SelectedThread;
  365. if (selectedSrcCore == thread || ((selectedSrcCore?.DynamicPriority ?? 2) < 2))
  366. {
  367. continue;
  368. }
  369. }
  370. // If the candidate was scheduled after the current thread, then it's not worth it,
  371. // unless the priority is higher than the current one.
  372. if (nextThreadOnCurrentQueue.LastScheduledTime >= thread.LastScheduledTime ||
  373. nextThreadOnCurrentQueue.DynamicPriority < thread.DynamicPriority)
  374. {
  375. yield return thread;
  376. }
  377. }
  378. }
  379. KThread dst = SuitableCandidates().FirstOrDefault(x => x.DynamicPriority <= prio);
  380. if (dst != null)
  381. {
  382. _schedulingData.TransferToCore(dst.DynamicPriority, core, dst);
  383. _scheduler.ThreadReselectionRequested = true;
  384. }
  385. if (this != nextThreadOnCurrentQueue)
  386. {
  387. _scheduler.ThreadReselectionRequested = true;
  388. }
  389. KernelContext.CriticalSection.Leave();
  390. KernelContext.Scheduler.ContextSwitch();
  391. }
  392. public void YieldAndWaitForLoadBalancing()
  393. {
  394. KernelContext.CriticalSection.Enter();
  395. if (SchedFlags != ThreadSchedState.Running)
  396. {
  397. KernelContext.CriticalSection.Leave();
  398. KernelContext.Scheduler.ContextSwitch();
  399. return;
  400. }
  401. int core = CurrentCore;
  402. _schedulingData.TransferToCore(DynamicPriority, -1, this);
  403. KThread selectedThread = null;
  404. if (!_schedulingData.ScheduledThreads(core).Any())
  405. {
  406. foreach (KThread thread in _schedulingData.SuggestedThreads(core))
  407. {
  408. if (thread.CurrentCore < 0)
  409. {
  410. continue;
  411. }
  412. KThread firstCandidate = _schedulingData.ScheduledThreads(thread.CurrentCore).FirstOrDefault();
  413. if (firstCandidate == thread)
  414. {
  415. continue;
  416. }
  417. if (firstCandidate == null || firstCandidate.DynamicPriority >= 2)
  418. {
  419. _schedulingData.TransferToCore(thread.DynamicPriority, core, thread);
  420. selectedThread = thread;
  421. }
  422. break;
  423. }
  424. }
  425. if (selectedThread != this)
  426. {
  427. _scheduler.ThreadReselectionRequested = true;
  428. }
  429. KernelContext.CriticalSection.Leave();
  430. KernelContext.Scheduler.ContextSwitch();
  431. }
  432. public void SetPriority(int priority)
  433. {
  434. KernelContext.CriticalSection.Enter();
  435. BasePriority = priority;
  436. UpdatePriorityInheritance();
  437. KernelContext.CriticalSection.Leave();
  438. }
  439. public KernelResult SetActivity(bool pause)
  440. {
  441. KernelResult result = KernelResult.Success;
  442. KernelContext.CriticalSection.Enter();
  443. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  444. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  445. {
  446. KernelContext.CriticalSection.Leave();
  447. return KernelResult.InvalidState;
  448. }
  449. KernelContext.CriticalSection.Enter();
  450. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  451. {
  452. if (pause)
  453. {
  454. // Pause, the force pause flag should be clear (thread is NOT paused).
  455. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  456. {
  457. _forcePauseFlags |= ThreadSchedState.ThreadPauseFlag;
  458. CombineForcePauseFlags();
  459. }
  460. else
  461. {
  462. result = KernelResult.InvalidState;
  463. }
  464. }
  465. else
  466. {
  467. // Unpause, the force pause flag should be set (thread is paused).
  468. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  469. {
  470. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  471. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  472. if ((oldForcePauseFlags & ~ThreadSchedState.ThreadPauseFlag) == ThreadSchedState.None)
  473. {
  474. ThreadSchedState oldSchedFlags = SchedFlags;
  475. SchedFlags &= ThreadSchedState.LowMask;
  476. AdjustScheduling(oldSchedFlags);
  477. }
  478. }
  479. else
  480. {
  481. result = KernelResult.InvalidState;
  482. }
  483. }
  484. }
  485. KernelContext.CriticalSection.Leave();
  486. KernelContext.CriticalSection.Leave();
  487. return result;
  488. }
  489. public void CancelSynchronization()
  490. {
  491. KernelContext.CriticalSection.Enter();
  492. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  493. {
  494. SyncCancelled = true;
  495. }
  496. else if (Withholder != null)
  497. {
  498. Withholder.Remove(WithholderNode);
  499. SetNewSchedFlags(ThreadSchedState.Running);
  500. Withholder = null;
  501. SyncCancelled = true;
  502. }
  503. else
  504. {
  505. SignaledObj = null;
  506. ObjSyncResult = KernelResult.Cancelled;
  507. SetNewSchedFlags(ThreadSchedState.Running);
  508. SyncCancelled = false;
  509. }
  510. KernelContext.CriticalSection.Leave();
  511. }
  512. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  513. {
  514. KernelContext.CriticalSection.Enter();
  515. bool useOverride = _affinityOverrideCount != 0;
  516. // The value -3 is "do not change the preferred core".
  517. if (newCore == -3)
  518. {
  519. newCore = useOverride ? _preferredCoreOverride : PreferredCore;
  520. if ((newAffinityMask & (1 << newCore)) == 0)
  521. {
  522. KernelContext.CriticalSection.Leave();
  523. return KernelResult.InvalidCombination;
  524. }
  525. }
  526. if (useOverride)
  527. {
  528. _preferredCoreOverride = newCore;
  529. _affinityMaskOverride = newAffinityMask;
  530. }
  531. else
  532. {
  533. long oldAffinityMask = AffinityMask;
  534. PreferredCore = newCore;
  535. AffinityMask = newAffinityMask;
  536. if (oldAffinityMask != newAffinityMask)
  537. {
  538. int oldCore = CurrentCore;
  539. if (CurrentCore >= 0 && ((AffinityMask >> CurrentCore) & 1) == 0)
  540. {
  541. if (PreferredCore < 0)
  542. {
  543. CurrentCore = HighestSetCore(AffinityMask);
  544. }
  545. else
  546. {
  547. CurrentCore = PreferredCore;
  548. }
  549. }
  550. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  551. }
  552. }
  553. KernelContext.CriticalSection.Leave();
  554. return KernelResult.Success;
  555. }
  556. private static int HighestSetCore(long mask)
  557. {
  558. for (int core = KScheduler.CpuCoresCount - 1; core >= 0; core--)
  559. {
  560. if (((mask >> core) & 1) != 0)
  561. {
  562. return core;
  563. }
  564. }
  565. return -1;
  566. }
  567. private void CombineForcePauseFlags()
  568. {
  569. ThreadSchedState oldFlags = SchedFlags;
  570. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  571. SchedFlags = lowNibble | _forcePauseFlags;
  572. AdjustScheduling(oldFlags);
  573. }
  574. private void SetNewSchedFlags(ThreadSchedState newFlags)
  575. {
  576. KernelContext.CriticalSection.Enter();
  577. ThreadSchedState oldFlags = SchedFlags;
  578. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  579. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  580. {
  581. AdjustScheduling(oldFlags);
  582. }
  583. KernelContext.CriticalSection.Leave();
  584. }
  585. public void ReleaseAndResume()
  586. {
  587. KernelContext.CriticalSection.Enter();
  588. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  589. {
  590. if (Withholder != null)
  591. {
  592. Withholder.Remove(WithholderNode);
  593. SetNewSchedFlags(ThreadSchedState.Running);
  594. Withholder = null;
  595. }
  596. else
  597. {
  598. SetNewSchedFlags(ThreadSchedState.Running);
  599. }
  600. }
  601. KernelContext.CriticalSection.Leave();
  602. }
  603. public void Reschedule(ThreadSchedState newFlags)
  604. {
  605. KernelContext.CriticalSection.Enter();
  606. ThreadSchedState oldFlags = SchedFlags;
  607. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  608. (newFlags & ThreadSchedState.LowMask);
  609. AdjustScheduling(oldFlags);
  610. KernelContext.CriticalSection.Leave();
  611. }
  612. public void AddMutexWaiter(KThread requester)
  613. {
  614. AddToMutexWaitersList(requester);
  615. requester.MutexOwner = this;
  616. UpdatePriorityInheritance();
  617. }
  618. public void RemoveMutexWaiter(KThread thread)
  619. {
  620. if (thread._mutexWaiterNode?.List != null)
  621. {
  622. _mutexWaiters.Remove(thread._mutexWaiterNode);
  623. }
  624. thread.MutexOwner = null;
  625. UpdatePriorityInheritance();
  626. }
  627. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  628. {
  629. count = 0;
  630. if (_mutexWaiters.First == null)
  631. {
  632. return null;
  633. }
  634. KThread newMutexOwner = null;
  635. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  636. do
  637. {
  638. // Skip all threads that are not waiting for this mutex.
  639. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  640. {
  641. currentNode = currentNode.Next;
  642. }
  643. if (currentNode == null)
  644. {
  645. break;
  646. }
  647. LinkedListNode<KThread> nextNode = currentNode.Next;
  648. _mutexWaiters.Remove(currentNode);
  649. currentNode.Value.MutexOwner = newMutexOwner;
  650. if (newMutexOwner != null)
  651. {
  652. // New owner was already selected, re-insert on new owner list.
  653. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  654. }
  655. else
  656. {
  657. // New owner not selected yet, use current thread.
  658. newMutexOwner = currentNode.Value;
  659. }
  660. count++;
  661. currentNode = nextNode;
  662. }
  663. while (currentNode != null);
  664. if (newMutexOwner != null)
  665. {
  666. UpdatePriorityInheritance();
  667. newMutexOwner.UpdatePriorityInheritance();
  668. }
  669. return newMutexOwner;
  670. }
  671. private void UpdatePriorityInheritance()
  672. {
  673. // If any of the threads waiting for the mutex has
  674. // higher priority than the current thread, then
  675. // the current thread inherits that priority.
  676. int highestPriority = BasePriority;
  677. if (_mutexWaiters.First != null)
  678. {
  679. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  680. if (waitingDynamicPriority < highestPriority)
  681. {
  682. highestPriority = waitingDynamicPriority;
  683. }
  684. }
  685. if (highestPriority != DynamicPriority)
  686. {
  687. int oldPriority = DynamicPriority;
  688. DynamicPriority = highestPriority;
  689. AdjustSchedulingForNewPriority(oldPriority);
  690. if (MutexOwner != null)
  691. {
  692. // Remove and re-insert to ensure proper sorting based on new priority.
  693. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  694. MutexOwner.AddToMutexWaitersList(this);
  695. MutexOwner.UpdatePriorityInheritance();
  696. }
  697. }
  698. }
  699. private void AddToMutexWaitersList(KThread thread)
  700. {
  701. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  702. int currentPriority = thread.DynamicPriority;
  703. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  704. {
  705. nextPrio = nextPrio.Next;
  706. }
  707. if (nextPrio != null)
  708. {
  709. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  710. }
  711. else
  712. {
  713. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  714. }
  715. }
  716. private void AdjustScheduling(ThreadSchedState oldFlags)
  717. {
  718. if (oldFlags == SchedFlags)
  719. {
  720. return;
  721. }
  722. if (oldFlags == ThreadSchedState.Running)
  723. {
  724. // Was running, now it's stopped.
  725. if (CurrentCore >= 0)
  726. {
  727. _schedulingData.Unschedule(DynamicPriority, CurrentCore, this);
  728. }
  729. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  730. {
  731. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  732. {
  733. _schedulingData.Unsuggest(DynamicPriority, core, this);
  734. }
  735. }
  736. }
  737. else if (SchedFlags == ThreadSchedState.Running)
  738. {
  739. // Was stopped, now it's running.
  740. if (CurrentCore >= 0)
  741. {
  742. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  743. }
  744. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  745. {
  746. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  747. {
  748. _schedulingData.Suggest(DynamicPriority, core, this);
  749. }
  750. }
  751. }
  752. _scheduler.ThreadReselectionRequested = true;
  753. }
  754. private void AdjustSchedulingForNewPriority(int oldPriority)
  755. {
  756. if (SchedFlags != ThreadSchedState.Running)
  757. {
  758. return;
  759. }
  760. // Remove thread from the old priority queues.
  761. if (CurrentCore >= 0)
  762. {
  763. _schedulingData.Unschedule(oldPriority, CurrentCore, this);
  764. }
  765. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  766. {
  767. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  768. {
  769. _schedulingData.Unsuggest(oldPriority, core, this);
  770. }
  771. }
  772. // Add thread to the new priority queues.
  773. KThread currentThread = _scheduler.GetCurrentThread();
  774. if (CurrentCore >= 0)
  775. {
  776. if (currentThread == this)
  777. {
  778. _schedulingData.SchedulePrepend(DynamicPriority, CurrentCore, this);
  779. }
  780. else
  781. {
  782. _schedulingData.Schedule(DynamicPriority, CurrentCore, this);
  783. }
  784. }
  785. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  786. {
  787. if (core != CurrentCore && ((AffinityMask >> core) & 1) != 0)
  788. {
  789. _schedulingData.Suggest(DynamicPriority, core, this);
  790. }
  791. }
  792. _scheduler.ThreadReselectionRequested = true;
  793. }
  794. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  795. {
  796. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount)
  797. {
  798. return;
  799. }
  800. // Remove thread from the old priority queues.
  801. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  802. {
  803. if (((oldAffinityMask >> core) & 1) != 0)
  804. {
  805. if (core == oldCore)
  806. {
  807. _schedulingData.Unschedule(DynamicPriority, core, this);
  808. }
  809. else
  810. {
  811. _schedulingData.Unsuggest(DynamicPriority, core, this);
  812. }
  813. }
  814. }
  815. // Add thread to the new priority queues.
  816. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  817. {
  818. if (((AffinityMask >> core) & 1) != 0)
  819. {
  820. if (core == CurrentCore)
  821. {
  822. _schedulingData.Schedule(DynamicPriority, core, this);
  823. }
  824. else
  825. {
  826. _schedulingData.Suggest(DynamicPriority, core, this);
  827. }
  828. }
  829. }
  830. _scheduler.ThreadReselectionRequested = true;
  831. }
  832. public void SetEntryArguments(long argsPtr, int threadHandle)
  833. {
  834. Context.SetX(0, (ulong)argsPtr);
  835. Context.SetX(1, (ulong)threadHandle);
  836. }
  837. public void TimeUp()
  838. {
  839. ReleaseAndResume();
  840. }
  841. public string GetGuestStackTrace()
  842. {
  843. return Owner.Debugger.GetGuestStackTrace(Context);
  844. }
  845. public void PrintGuestStackTrace()
  846. {
  847. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  848. }
  849. public void Execute()
  850. {
  851. if (Interlocked.CompareExchange(ref _hostThreadRunning, 1, 0) == 0)
  852. {
  853. HostThread.Start();
  854. }
  855. }
  856. private void ThreadStart(ulong entrypoint)
  857. {
  858. Owner.CpuContext.Execute(Context, entrypoint);
  859. ThreadExit();
  860. Context.Dispose();
  861. }
  862. private void ThreadExit()
  863. {
  864. KernelContext.Scheduler.ExitThread(this);
  865. KernelContext.Scheduler.RemoveThread(this);
  866. }
  867. public bool IsCurrentHostThread()
  868. {
  869. return Thread.CurrentThread == HostThread;
  870. }
  871. public override bool IsSignaled()
  872. {
  873. return _hasExited;
  874. }
  875. protected override void Destroy()
  876. {
  877. if (_hasBeenInitialized)
  878. {
  879. FreeResources();
  880. bool released = Owner != null || _hasBeenReleased;
  881. if (Owner != null)
  882. {
  883. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  884. Owner.DecrementReferenceCount();
  885. }
  886. else
  887. {
  888. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  889. }
  890. }
  891. }
  892. private void FreeResources()
  893. {
  894. Owner?.RemoveThread(this);
  895. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  896. {
  897. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  898. }
  899. KernelContext.CriticalSection.Enter();
  900. // Wake up all threads that may be waiting for a mutex being held by this thread.
  901. foreach (KThread thread in _mutexWaiters)
  902. {
  903. thread.MutexOwner = null;
  904. thread._preferredCoreOverride = 0;
  905. thread.ObjSyncResult = KernelResult.InvalidState;
  906. thread.ReleaseAndResume();
  907. }
  908. KernelContext.CriticalSection.Leave();
  909. Owner?.DecrementThreadCountAndTerminateIfZero();
  910. }
  911. }
  912. }