KThread.cs 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using System;
  6. using System.Collections.Generic;
  7. using System.Numerics;
  8. using System.Threading;
  9. namespace Ryujinx.HLE.HOS.Kernel.Threading
  10. {
  11. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  12. {
  13. private const int TlsUserDisableCountOffset = 0x100;
  14. private const int TlsUserInterruptFlagOffset = 0x102;
  15. public const int MaxWaitSyncObjects = 64;
  16. private ManualResetEvent _schedulerWaitEvent;
  17. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  18. public Thread HostThread { get; private set; }
  19. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  20. public KThreadContext ThreadContext { get; private set; }
  21. public int DynamicPriority { get; set; }
  22. public long AffinityMask { get; set; }
  23. public long ThreadUid { get; private set; }
  24. private long _totalTimeRunning;
  25. public long TotalTimeRunning => _totalTimeRunning;
  26. public KSynchronizationObject SignaledObj { get; set; }
  27. public ulong CondVarAddress { get; set; }
  28. private ulong _entrypoint;
  29. private ThreadStart _customThreadStart;
  30. private bool _forcedUnschedulable;
  31. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  32. public ulong MutexAddress { get; set; }
  33. public int KernelWaitersCount { get; private set; }
  34. public KProcess Owner { get; private set; }
  35. private ulong _tlsAddress;
  36. public ulong TlsAddress => _tlsAddress;
  37. public KSynchronizationObject[] WaitSyncObjects { get; }
  38. public int[] WaitSyncHandles { get; }
  39. public long LastScheduledTime { get; set; }
  40. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  41. public LinkedList<KThread> Withholder { get; set; }
  42. public LinkedListNode<KThread> WithholderNode { get; set; }
  43. public LinkedListNode<KThread> ProcessListNode { get; set; }
  44. private LinkedList<KThread> _mutexWaiters;
  45. private LinkedListNode<KThread> _mutexWaiterNode;
  46. private LinkedList<KThread> _pinnedWaiters;
  47. public KThread MutexOwner { get; private set; }
  48. public int ThreadHandleForUserMutex { get; set; }
  49. private ThreadSchedState _forcePauseFlags;
  50. private ThreadSchedState _forcePausePermissionFlags;
  51. public KernelResult ObjSyncResult { get; set; }
  52. public int BasePriority { get; set; }
  53. public int PreferredCore { get; set; }
  54. public int CurrentCore { get; set; }
  55. public int ActiveCore { get; set; }
  56. public bool IsPinned { get; private set; }
  57. private long _originalAffinityMask;
  58. private int _originalPreferredCore;
  59. private int _originalBasePriority;
  60. private int _coreMigrationDisableCount;
  61. public ThreadSchedState SchedFlags { get; private set; }
  62. private int _shallBeTerminated;
  63. public bool ShallBeTerminated
  64. {
  65. get => _shallBeTerminated != 0;
  66. set => _shallBeTerminated = value ? 1 : 0;
  67. }
  68. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  69. public bool SyncCancelled { get; set; }
  70. public bool WaitingSync { get; set; }
  71. private int _hasExited;
  72. private bool _hasBeenInitialized;
  73. private bool _hasBeenReleased;
  74. public bool WaitingInArbitration { get; set; }
  75. public long LastPc { get; set; }
  76. private object ActivityOperationLock = new object();
  77. public KThread(KernelContext context) : base(context)
  78. {
  79. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  80. WaitSyncHandles = new int[MaxWaitSyncObjects];
  81. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  82. _mutexWaiters = new LinkedList<KThread>();
  83. _pinnedWaiters = new LinkedList<KThread>();
  84. }
  85. public KernelResult Initialize(
  86. ulong entrypoint,
  87. ulong argsPtr,
  88. ulong stackTop,
  89. int priority,
  90. int cpuCore,
  91. KProcess owner,
  92. ThreadType type,
  93. ThreadStart customThreadStart = null)
  94. {
  95. if ((uint)type > 3)
  96. {
  97. throw new ArgumentException($"Invalid thread type \"{type}\".");
  98. }
  99. ThreadContext = new KThreadContext();
  100. PreferredCore = cpuCore;
  101. AffinityMask |= 1L << cpuCore;
  102. SchedFlags = type == ThreadType.Dummy
  103. ? ThreadSchedState.Running
  104. : ThreadSchedState.None;
  105. ActiveCore = cpuCore;
  106. ObjSyncResult = KernelResult.ThreadNotStarted;
  107. DynamicPriority = priority;
  108. BasePriority = priority;
  109. CurrentCore = cpuCore;
  110. IsPinned = false;
  111. _entrypoint = entrypoint;
  112. _customThreadStart = customThreadStart;
  113. if (type == ThreadType.User)
  114. {
  115. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  116. {
  117. return KernelResult.OutOfMemory;
  118. }
  119. MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
  120. }
  121. bool is64Bits;
  122. if (owner != null)
  123. {
  124. Owner = owner;
  125. owner.IncrementReferenceCount();
  126. owner.IncrementThreadCount();
  127. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  128. }
  129. else
  130. {
  131. is64Bits = true;
  132. }
  133. HostThread = new Thread(ThreadStart);
  134. Context = CpuContext.CreateExecutionContext();
  135. Context.IsAarch32 = !is64Bits;
  136. Context.SetX(0, argsPtr);
  137. if (is64Bits)
  138. {
  139. Context.SetX(18, KSystemControl.GenerateRandom() | 1);
  140. Context.SetX(31, stackTop);
  141. }
  142. else
  143. {
  144. Context.SetX(13, (uint)stackTop);
  145. }
  146. Context.CntfrqEl0 = 19200000;
  147. Context.Tpidr = (long)_tlsAddress;
  148. ThreadUid = KernelContext.NewThreadUid();
  149. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  150. _hasBeenInitialized = true;
  151. _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask;
  152. if (owner != null)
  153. {
  154. owner.SubscribeThreadEventHandlers(Context);
  155. owner.AddThread(this);
  156. if (owner.IsPaused)
  157. {
  158. KernelContext.CriticalSection.Enter();
  159. if (TerminationRequested)
  160. {
  161. KernelContext.CriticalSection.Leave();
  162. return KernelResult.Success;
  163. }
  164. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  165. CombineForcePauseFlags();
  166. KernelContext.CriticalSection.Leave();
  167. }
  168. }
  169. return KernelResult.Success;
  170. }
  171. public KernelResult Start()
  172. {
  173. if (!KernelContext.KernelInitialized)
  174. {
  175. KernelContext.CriticalSection.Enter();
  176. if (!TerminationRequested)
  177. {
  178. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  179. CombineForcePauseFlags();
  180. }
  181. KernelContext.CriticalSection.Leave();
  182. }
  183. KernelResult result = KernelResult.ThreadTerminating;
  184. KernelContext.CriticalSection.Enter();
  185. if (!ShallBeTerminated)
  186. {
  187. KThread currentThread = KernelStatic.GetCurrentThread();
  188. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  189. {
  190. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  191. {
  192. result = KernelResult.InvalidState;
  193. break;
  194. }
  195. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  196. {
  197. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  198. {
  199. CombineForcePauseFlags();
  200. }
  201. SetNewSchedFlags(ThreadSchedState.Running);
  202. StartHostThread();
  203. result = KernelResult.Success;
  204. break;
  205. }
  206. else
  207. {
  208. currentThread.CombineForcePauseFlags();
  209. KernelContext.CriticalSection.Leave();
  210. KernelContext.CriticalSection.Enter();
  211. if (currentThread.ShallBeTerminated)
  212. {
  213. break;
  214. }
  215. }
  216. }
  217. }
  218. KernelContext.CriticalSection.Leave();
  219. return result;
  220. }
  221. public ThreadSchedState PrepareForTermination()
  222. {
  223. KernelContext.CriticalSection.Enter();
  224. if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this)
  225. {
  226. Owner.UnpinThread(this);
  227. }
  228. ThreadSchedState result;
  229. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  230. {
  231. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  232. {
  233. SchedFlags = ThreadSchedState.TerminationPending;
  234. }
  235. else
  236. {
  237. if (_forcePauseFlags != ThreadSchedState.None)
  238. {
  239. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  240. ThreadSchedState oldSchedFlags = SchedFlags;
  241. SchedFlags &= ThreadSchedState.LowMask;
  242. AdjustScheduling(oldSchedFlags);
  243. }
  244. if (BasePriority >= 0x10)
  245. {
  246. SetPriority(0xF);
  247. }
  248. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  249. {
  250. // TODO: GIC distributor stuffs (sgir changes ect)
  251. Context.RequestInterrupt();
  252. }
  253. SignaledObj = null;
  254. ObjSyncResult = KernelResult.ThreadTerminating;
  255. ReleaseAndResume();
  256. }
  257. }
  258. result = SchedFlags;
  259. KernelContext.CriticalSection.Leave();
  260. return result & ThreadSchedState.LowMask;
  261. }
  262. public void Terminate()
  263. {
  264. ThreadSchedState state = PrepareForTermination();
  265. if (state != ThreadSchedState.TerminationPending)
  266. {
  267. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  268. }
  269. }
  270. public void HandlePostSyscall()
  271. {
  272. ThreadSchedState state;
  273. do
  274. {
  275. if (TerminationRequested)
  276. {
  277. Exit();
  278. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  279. break;
  280. }
  281. KernelContext.CriticalSection.Enter();
  282. if (TerminationRequested)
  283. {
  284. state = ThreadSchedState.TerminationPending;
  285. }
  286. else
  287. {
  288. if (_forcePauseFlags != ThreadSchedState.None)
  289. {
  290. CombineForcePauseFlags();
  291. }
  292. state = ThreadSchedState.Running;
  293. }
  294. KernelContext.CriticalSection.Leave();
  295. } while (state == ThreadSchedState.TerminationPending);
  296. }
  297. public void Exit()
  298. {
  299. // TODO: Debug event.
  300. if (Owner != null)
  301. {
  302. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  303. _hasBeenReleased = true;
  304. }
  305. KernelContext.CriticalSection.Enter();
  306. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  307. _forcePausePermissionFlags = 0;
  308. bool decRef = ExitImpl();
  309. Context.StopRunning();
  310. KernelContext.CriticalSection.Leave();
  311. if (decRef)
  312. {
  313. DecrementReferenceCount();
  314. }
  315. }
  316. private bool ExitImpl()
  317. {
  318. KernelContext.CriticalSection.Enter();
  319. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  320. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  321. Signal();
  322. KernelContext.CriticalSection.Leave();
  323. return decRef;
  324. }
  325. private int GetEffectiveRunningCore()
  326. {
  327. for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++)
  328. {
  329. if (KernelContext.Schedulers[coreNumber].CurrentThread == this)
  330. {
  331. return coreNumber;
  332. }
  333. }
  334. return -1;
  335. }
  336. public KernelResult Sleep(long timeout)
  337. {
  338. KernelContext.CriticalSection.Enter();
  339. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  340. {
  341. KernelContext.CriticalSection.Leave();
  342. return KernelResult.ThreadTerminating;
  343. }
  344. SetNewSchedFlags(ThreadSchedState.Paused);
  345. if (timeout > 0)
  346. {
  347. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  348. }
  349. KernelContext.CriticalSection.Leave();
  350. if (timeout > 0)
  351. {
  352. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  353. }
  354. return 0;
  355. }
  356. public void SetPriority(int priority)
  357. {
  358. KernelContext.CriticalSection.Enter();
  359. if (IsPinned)
  360. {
  361. _originalBasePriority = priority;
  362. }
  363. else
  364. {
  365. BasePriority = priority;
  366. }
  367. UpdatePriorityInheritance();
  368. KernelContext.CriticalSection.Leave();
  369. }
  370. public void Suspend(ThreadSchedState type)
  371. {
  372. _forcePauseFlags |= type;
  373. CombineForcePauseFlags();
  374. }
  375. public void Resume(ThreadSchedState type)
  376. {
  377. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  378. _forcePauseFlags &= ~type;
  379. if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
  380. {
  381. ThreadSchedState oldSchedFlags = SchedFlags;
  382. SchedFlags &= ThreadSchedState.LowMask;
  383. AdjustScheduling(oldSchedFlags);
  384. }
  385. }
  386. public KernelResult SetActivity(bool pause)
  387. {
  388. lock (ActivityOperationLock)
  389. {
  390. KernelResult result = KernelResult.Success;
  391. KernelContext.CriticalSection.Enter();
  392. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  393. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  394. {
  395. KernelContext.CriticalSection.Leave();
  396. return KernelResult.InvalidState;
  397. }
  398. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  399. {
  400. if (pause)
  401. {
  402. // Pause, the force pause flag should be clear (thread is NOT paused).
  403. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  404. {
  405. Suspend(ThreadSchedState.ThreadPauseFlag);
  406. }
  407. else
  408. {
  409. result = KernelResult.InvalidState;
  410. }
  411. }
  412. else
  413. {
  414. // Unpause, the force pause flag should be set (thread is paused).
  415. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  416. {
  417. Resume(ThreadSchedState.ThreadPauseFlag);
  418. }
  419. else
  420. {
  421. result = KernelResult.InvalidState;
  422. }
  423. }
  424. }
  425. KernelContext.CriticalSection.Leave();
  426. if (result == KernelResult.Success && pause)
  427. {
  428. bool isThreadRunning = true;
  429. while (isThreadRunning)
  430. {
  431. KernelContext.CriticalSection.Enter();
  432. if (TerminationRequested)
  433. {
  434. KernelContext.CriticalSection.Leave();
  435. break;
  436. }
  437. isThreadRunning = false;
  438. if (IsPinned)
  439. {
  440. KThread currentThread = KernelStatic.GetCurrentThread();
  441. if (currentThread.TerminationRequested)
  442. {
  443. KernelContext.CriticalSection.Leave();
  444. result = KernelResult.ThreadTerminating;
  445. break;
  446. }
  447. _pinnedWaiters.AddLast(currentThread);
  448. currentThread.Reschedule(ThreadSchedState.Paused);
  449. }
  450. else
  451. {
  452. isThreadRunning = GetEffectiveRunningCore() >= 0;
  453. }
  454. KernelContext.CriticalSection.Leave();
  455. }
  456. }
  457. return result;
  458. }
  459. }
  460. public void CancelSynchronization()
  461. {
  462. KernelContext.CriticalSection.Enter();
  463. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  464. {
  465. SyncCancelled = true;
  466. }
  467. else if (Withholder != null)
  468. {
  469. Withholder.Remove(WithholderNode);
  470. SetNewSchedFlags(ThreadSchedState.Running);
  471. Withholder = null;
  472. SyncCancelled = true;
  473. }
  474. else
  475. {
  476. SignaledObj = null;
  477. ObjSyncResult = KernelResult.Cancelled;
  478. SetNewSchedFlags(ThreadSchedState.Running);
  479. SyncCancelled = false;
  480. }
  481. KernelContext.CriticalSection.Leave();
  482. }
  483. public KernelResult SetCoreAndAffinityMask(int newCore, long newAffinityMask)
  484. {
  485. lock (ActivityOperationLock)
  486. {
  487. KernelContext.CriticalSection.Enter();
  488. bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0;
  489. // The value -3 is "do not change the preferred core".
  490. if (newCore == -3)
  491. {
  492. newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore;
  493. if ((newAffinityMask & (1 << newCore)) == 0)
  494. {
  495. KernelContext.CriticalSection.Leave();
  496. return KernelResult.InvalidCombination;
  497. }
  498. }
  499. if (isCoreMigrationDisabled)
  500. {
  501. _originalPreferredCore = newCore;
  502. _originalAffinityMask = newAffinityMask;
  503. }
  504. else
  505. {
  506. long oldAffinityMask = AffinityMask;
  507. PreferredCore = newCore;
  508. AffinityMask = newAffinityMask;
  509. if (oldAffinityMask != newAffinityMask)
  510. {
  511. int oldCore = ActiveCore;
  512. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  513. {
  514. if (PreferredCore < 0)
  515. {
  516. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  517. }
  518. else
  519. {
  520. ActiveCore = PreferredCore;
  521. }
  522. }
  523. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  524. }
  525. }
  526. KernelContext.CriticalSection.Leave();
  527. bool targetThreadPinned = true;
  528. while (targetThreadPinned)
  529. {
  530. KernelContext.CriticalSection.Enter();
  531. if (TerminationRequested)
  532. {
  533. KernelContext.CriticalSection.Leave();
  534. break;
  535. }
  536. targetThreadPinned = false;
  537. int coreNumber = GetEffectiveRunningCore();
  538. bool isPinnedThreadCurrentlyRunning = coreNumber >= 0;
  539. if (isPinnedThreadCurrentlyRunning && ((1 << coreNumber) & AffinityMask) == 0)
  540. {
  541. if (IsPinned)
  542. {
  543. KThread currentThread = KernelStatic.GetCurrentThread();
  544. if (currentThread.TerminationRequested)
  545. {
  546. KernelContext.CriticalSection.Leave();
  547. return KernelResult.ThreadTerminating;
  548. }
  549. _pinnedWaiters.AddLast(currentThread);
  550. currentThread.Reschedule(ThreadSchedState.Paused);
  551. }
  552. else
  553. {
  554. targetThreadPinned = true;
  555. }
  556. }
  557. KernelContext.CriticalSection.Leave();
  558. }
  559. return KernelResult.Success;
  560. }
  561. }
  562. private void CombineForcePauseFlags()
  563. {
  564. ThreadSchedState oldFlags = SchedFlags;
  565. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  566. SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags);
  567. AdjustScheduling(oldFlags);
  568. }
  569. private void SetNewSchedFlags(ThreadSchedState newFlags)
  570. {
  571. KernelContext.CriticalSection.Enter();
  572. ThreadSchedState oldFlags = SchedFlags;
  573. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  574. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  575. {
  576. AdjustScheduling(oldFlags);
  577. }
  578. KernelContext.CriticalSection.Leave();
  579. }
  580. public void ReleaseAndResume()
  581. {
  582. KernelContext.CriticalSection.Enter();
  583. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  584. {
  585. if (Withholder != null)
  586. {
  587. Withholder.Remove(WithholderNode);
  588. SetNewSchedFlags(ThreadSchedState.Running);
  589. Withholder = null;
  590. }
  591. else
  592. {
  593. SetNewSchedFlags(ThreadSchedState.Running);
  594. }
  595. }
  596. KernelContext.CriticalSection.Leave();
  597. }
  598. public void Reschedule(ThreadSchedState newFlags)
  599. {
  600. KernelContext.CriticalSection.Enter();
  601. ThreadSchedState oldFlags = SchedFlags;
  602. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  603. (newFlags & ThreadSchedState.LowMask);
  604. AdjustScheduling(oldFlags);
  605. KernelContext.CriticalSection.Leave();
  606. }
  607. public void AddMutexWaiter(KThread requester)
  608. {
  609. AddToMutexWaitersList(requester);
  610. requester.MutexOwner = this;
  611. UpdatePriorityInheritance();
  612. }
  613. public void RemoveMutexWaiter(KThread thread)
  614. {
  615. if (thread._mutexWaiterNode?.List != null)
  616. {
  617. _mutexWaiters.Remove(thread._mutexWaiterNode);
  618. }
  619. thread.MutexOwner = null;
  620. UpdatePriorityInheritance();
  621. }
  622. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  623. {
  624. count = 0;
  625. if (_mutexWaiters.First == null)
  626. {
  627. return null;
  628. }
  629. KThread newMutexOwner = null;
  630. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  631. do
  632. {
  633. // Skip all threads that are not waiting for this mutex.
  634. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  635. {
  636. currentNode = currentNode.Next;
  637. }
  638. if (currentNode == null)
  639. {
  640. break;
  641. }
  642. LinkedListNode<KThread> nextNode = currentNode.Next;
  643. _mutexWaiters.Remove(currentNode);
  644. currentNode.Value.MutexOwner = newMutexOwner;
  645. if (newMutexOwner != null)
  646. {
  647. // New owner was already selected, re-insert on new owner list.
  648. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  649. }
  650. else
  651. {
  652. // New owner not selected yet, use current thread.
  653. newMutexOwner = currentNode.Value;
  654. }
  655. count++;
  656. currentNode = nextNode;
  657. }
  658. while (currentNode != null);
  659. if (newMutexOwner != null)
  660. {
  661. UpdatePriorityInheritance();
  662. newMutexOwner.UpdatePriorityInheritance();
  663. }
  664. return newMutexOwner;
  665. }
  666. private void UpdatePriorityInheritance()
  667. {
  668. // If any of the threads waiting for the mutex has
  669. // higher priority than the current thread, then
  670. // the current thread inherits that priority.
  671. int highestPriority = BasePriority;
  672. if (_mutexWaiters.First != null)
  673. {
  674. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  675. if (waitingDynamicPriority < highestPriority)
  676. {
  677. highestPriority = waitingDynamicPriority;
  678. }
  679. }
  680. if (highestPriority != DynamicPriority)
  681. {
  682. int oldPriority = DynamicPriority;
  683. DynamicPriority = highestPriority;
  684. AdjustSchedulingForNewPriority(oldPriority);
  685. if (MutexOwner != null)
  686. {
  687. // Remove and re-insert to ensure proper sorting based on new priority.
  688. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  689. MutexOwner.AddToMutexWaitersList(this);
  690. MutexOwner.UpdatePriorityInheritance();
  691. }
  692. }
  693. }
  694. private void AddToMutexWaitersList(KThread thread)
  695. {
  696. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  697. int currentPriority = thread.DynamicPriority;
  698. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  699. {
  700. nextPrio = nextPrio.Next;
  701. }
  702. if (nextPrio != null)
  703. {
  704. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  705. }
  706. else
  707. {
  708. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  709. }
  710. }
  711. private void AdjustScheduling(ThreadSchedState oldFlags)
  712. {
  713. if (oldFlags == SchedFlags)
  714. {
  715. return;
  716. }
  717. if (!IsSchedulable)
  718. {
  719. if (!_forcedUnschedulable)
  720. {
  721. // Ensure our thread is running and we have an event.
  722. StartHostThread();
  723. // If the thread is not schedulable, we want to just run or pause
  724. // it directly as we don't care about priority or the core it is
  725. // running on in this case.
  726. if (SchedFlags == ThreadSchedState.Running)
  727. {
  728. _schedulerWaitEvent.Set();
  729. }
  730. else
  731. {
  732. _schedulerWaitEvent.Reset();
  733. }
  734. }
  735. return;
  736. }
  737. if (oldFlags == ThreadSchedState.Running)
  738. {
  739. // Was running, now it's stopped.
  740. if (ActiveCore >= 0)
  741. {
  742. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  743. }
  744. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  745. {
  746. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  747. {
  748. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  749. }
  750. }
  751. }
  752. else if (SchedFlags == ThreadSchedState.Running)
  753. {
  754. // Was stopped, now it's running.
  755. if (ActiveCore >= 0)
  756. {
  757. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  758. }
  759. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  760. {
  761. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  762. {
  763. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  764. }
  765. }
  766. }
  767. KernelContext.ThreadReselectionRequested = true;
  768. }
  769. private void AdjustSchedulingForNewPriority(int oldPriority)
  770. {
  771. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  772. {
  773. return;
  774. }
  775. // Remove thread from the old priority queues.
  776. if (ActiveCore >= 0)
  777. {
  778. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  779. }
  780. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  781. {
  782. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  783. {
  784. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  785. }
  786. }
  787. // Add thread to the new priority queues.
  788. KThread currentThread = KernelStatic.GetCurrentThread();
  789. if (ActiveCore >= 0)
  790. {
  791. if (currentThread == this)
  792. {
  793. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  794. }
  795. else
  796. {
  797. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  798. }
  799. }
  800. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  801. {
  802. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  803. {
  804. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  805. }
  806. }
  807. KernelContext.ThreadReselectionRequested = true;
  808. }
  809. private void AdjustSchedulingForNewAffinity(long oldAffinityMask, int oldCore)
  810. {
  811. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  812. {
  813. return;
  814. }
  815. // Remove thread from the old priority queues.
  816. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  817. {
  818. if (((oldAffinityMask >> core) & 1) != 0)
  819. {
  820. if (core == oldCore)
  821. {
  822. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  823. }
  824. else
  825. {
  826. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  827. }
  828. }
  829. }
  830. // Add thread to the new priority queues.
  831. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  832. {
  833. if (((AffinityMask >> core) & 1) != 0)
  834. {
  835. if (core == ActiveCore)
  836. {
  837. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  838. }
  839. else
  840. {
  841. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  842. }
  843. }
  844. }
  845. KernelContext.ThreadReselectionRequested = true;
  846. }
  847. public void SetEntryArguments(long argsPtr, int threadHandle)
  848. {
  849. Context.SetX(0, (ulong)argsPtr);
  850. Context.SetX(1, (ulong)threadHandle);
  851. }
  852. public void TimeUp()
  853. {
  854. ReleaseAndResume();
  855. }
  856. public string GetGuestStackTrace()
  857. {
  858. return Owner.Debugger.GetGuestStackTrace(this);
  859. }
  860. public string GetGuestRegisterPrintout()
  861. {
  862. return Owner.Debugger.GetCpuRegisterPrintout(this);
  863. }
  864. public void PrintGuestStackTrace()
  865. {
  866. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  867. }
  868. public void PrintGuestRegisterPrintout()
  869. {
  870. Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
  871. }
  872. public void AddCpuTime(long ticks)
  873. {
  874. Interlocked.Add(ref _totalTimeRunning, ticks);
  875. }
  876. public void StartHostThread()
  877. {
  878. if (_schedulerWaitEvent == null)
  879. {
  880. var schedulerWaitEvent = new ManualResetEvent(false);
  881. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  882. {
  883. HostThread.Start();
  884. }
  885. else
  886. {
  887. schedulerWaitEvent.Dispose();
  888. }
  889. }
  890. }
  891. private void ThreadStart()
  892. {
  893. _schedulerWaitEvent.WaitOne();
  894. KernelStatic.SetKernelContext(KernelContext, this);
  895. if (_customThreadStart != null)
  896. {
  897. _customThreadStart();
  898. }
  899. else
  900. {
  901. Owner.Context.Execute(Context, _entrypoint);
  902. }
  903. Context.Dispose();
  904. _schedulerWaitEvent.Dispose();
  905. }
  906. public void MakeUnschedulable()
  907. {
  908. _forcedUnschedulable = true;
  909. }
  910. public override bool IsSignaled()
  911. {
  912. return _hasExited != 0;
  913. }
  914. protected override void Destroy()
  915. {
  916. if (_hasBeenInitialized)
  917. {
  918. FreeResources();
  919. bool released = Owner != null || _hasBeenReleased;
  920. if (Owner != null)
  921. {
  922. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  923. Owner.DecrementReferenceCount();
  924. }
  925. else
  926. {
  927. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  928. }
  929. }
  930. }
  931. private void FreeResources()
  932. {
  933. Owner?.RemoveThread(this);
  934. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  935. {
  936. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  937. }
  938. KernelContext.CriticalSection.Enter();
  939. // Wake up all threads that may be waiting for a mutex being held by this thread.
  940. foreach (KThread thread in _mutexWaiters)
  941. {
  942. thread.MutexOwner = null;
  943. thread._originalPreferredCore = 0;
  944. thread.ObjSyncResult = KernelResult.InvalidState;
  945. thread.ReleaseAndResume();
  946. }
  947. KernelContext.CriticalSection.Leave();
  948. Owner?.DecrementThreadCountAndTerminateIfZero();
  949. }
  950. public void Pin()
  951. {
  952. IsPinned = true;
  953. _coreMigrationDisableCount++;
  954. int activeCore = ActiveCore;
  955. _originalPreferredCore = PreferredCore;
  956. _originalAffinityMask = AffinityMask;
  957. ActiveCore = CurrentCore;
  958. PreferredCore = CurrentCore;
  959. AffinityMask = 1 << CurrentCore;
  960. if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask)
  961. {
  962. AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore);
  963. }
  964. _originalBasePriority = BasePriority;
  965. BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1);
  966. UpdatePriorityInheritance();
  967. // Disallows thread pausing
  968. _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag;
  969. CombineForcePauseFlags();
  970. // TODO: Assign reduced SVC permissions
  971. }
  972. public void Unpin()
  973. {
  974. IsPinned = false;
  975. _coreMigrationDisableCount--;
  976. long affinityMask = AffinityMask;
  977. int activeCore = ActiveCore;
  978. PreferredCore = _originalPreferredCore;
  979. AffinityMask = _originalAffinityMask;
  980. if (AffinityMask != affinityMask)
  981. {
  982. if ((AffinityMask & 1 << ActiveCore) != 0)
  983. {
  984. if (PreferredCore >= 0)
  985. {
  986. ActiveCore = PreferredCore;
  987. }
  988. else
  989. {
  990. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  991. }
  992. AdjustSchedulingForNewAffinity(affinityMask, activeCore);
  993. }
  994. }
  995. BasePriority = _originalBasePriority;
  996. UpdatePriorityInheritance();
  997. if (!TerminationRequested)
  998. {
  999. // Allows thread pausing
  1000. _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag;
  1001. CombineForcePauseFlags();
  1002. // TODO: Restore SVC permissions
  1003. }
  1004. // Wake up waiters
  1005. foreach (KThread waiter in _pinnedWaiters)
  1006. {
  1007. waiter.ReleaseAndResume();
  1008. }
  1009. _pinnedWaiters.Clear();
  1010. }
  1011. public void SynchronizePreemptionState()
  1012. {
  1013. KernelContext.CriticalSection.Enter();
  1014. if (Owner != null && Owner.PinnedThreads[CurrentCore] == this)
  1015. {
  1016. ClearUserInterruptFlag();
  1017. Owner.UnpinThread(this);
  1018. }
  1019. KernelContext.CriticalSection.Leave();
  1020. }
  1021. public ushort GetUserDisableCount()
  1022. {
  1023. return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset);
  1024. }
  1025. public void SetUserInterruptFlag()
  1026. {
  1027. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1);
  1028. }
  1029. public void ClearUserInterruptFlag()
  1030. {
  1031. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0);
  1032. }
  1033. }
  1034. }