KThread.cs 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using Ryujinx.HLE.HOS.Kernel.SupervisorCall;
  6. using Ryujinx.Horizon.Common;
  7. using System;
  8. using System.Collections.Generic;
  9. using System.Numerics;
  10. using System.Threading;
  11. namespace Ryujinx.HLE.HOS.Kernel.Threading
  12. {
  13. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  14. {
  15. private const int TlsUserDisableCountOffset = 0x100;
  16. private const int TlsUserInterruptFlagOffset = 0x102;
  17. public const int MaxWaitSyncObjects = 64;
  18. private ManualResetEvent _schedulerWaitEvent;
  19. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  20. public Thread HostThread { get; private set; }
  21. public IExecutionContext Context { get; private set; }
  22. public KThreadContext ThreadContext { get; private set; }
  23. public int DynamicPriority { get; set; }
  24. public ulong AffinityMask { get; set; }
  25. public ulong ThreadUid { get; private set; }
  26. private long _totalTimeRunning;
  27. public long TotalTimeRunning => _totalTimeRunning;
  28. public KSynchronizationObject SignaledObj { get; set; }
  29. public ulong CondVarAddress { get; set; }
  30. private ulong _entrypoint;
  31. private ThreadStart _customThreadStart;
  32. private bool _forcedUnschedulable;
  33. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  34. public ulong MutexAddress { get; set; }
  35. public int KernelWaitersCount { get; private set; }
  36. public KProcess Owner { get; private set; }
  37. private ulong _tlsAddress;
  38. public ulong TlsAddress => _tlsAddress;
  39. public KSynchronizationObject[] WaitSyncObjects { get; }
  40. public int[] WaitSyncHandles { get; }
  41. public long LastScheduledTime { get; set; }
  42. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  43. public LinkedList<KThread> Withholder { get; set; }
  44. public LinkedListNode<KThread> WithholderNode { get; set; }
  45. public LinkedListNode<KThread> ProcessListNode { get; set; }
  46. private LinkedList<KThread> _mutexWaiters;
  47. private LinkedListNode<KThread> _mutexWaiterNode;
  48. private LinkedList<KThread> _pinnedWaiters;
  49. public KThread MutexOwner { get; private set; }
  50. public int ThreadHandleForUserMutex { get; set; }
  51. private ThreadSchedState _forcePauseFlags;
  52. private ThreadSchedState _forcePausePermissionFlags;
  53. public Result ObjSyncResult { get; set; }
  54. public int BasePriority { get; set; }
  55. public int PreferredCore { get; set; }
  56. public int CurrentCore { get; set; }
  57. public int ActiveCore { get; set; }
  58. public bool IsPinned { get; private set; }
  59. private ulong _originalAffinityMask;
  60. private int _originalPreferredCore;
  61. private int _originalBasePriority;
  62. private int _coreMigrationDisableCount;
  63. public ThreadSchedState SchedFlags { get; private set; }
  64. private int _shallBeTerminated;
  65. public bool ShallBeTerminated
  66. {
  67. get => _shallBeTerminated != 0;
  68. set => _shallBeTerminated = value ? 1 : 0;
  69. }
  70. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  71. public bool SyncCancelled { get; set; }
  72. public bool WaitingSync { get; set; }
  73. private int _hasExited;
  74. private bool _hasBeenInitialized;
  75. private bool _hasBeenReleased;
  76. public bool WaitingInArbitration { get; set; }
  77. private object _activityOperationLock;
  78. public KThread(KernelContext context) : base(context)
  79. {
  80. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  81. WaitSyncHandles = new int[MaxWaitSyncObjects];
  82. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  83. _mutexWaiters = new LinkedList<KThread>();
  84. _pinnedWaiters = new LinkedList<KThread>();
  85. _activityOperationLock = new object();
  86. }
  87. public Result Initialize(
  88. ulong entrypoint,
  89. ulong argsPtr,
  90. ulong stackTop,
  91. int priority,
  92. int cpuCore,
  93. KProcess owner,
  94. ThreadType type,
  95. ThreadStart customThreadStart = null)
  96. {
  97. if ((uint)type > 3)
  98. {
  99. throw new ArgumentException($"Invalid thread type \"{type}\".");
  100. }
  101. PreferredCore = cpuCore;
  102. AffinityMask |= 1UL << cpuCore;
  103. SchedFlags = type == ThreadType.Dummy
  104. ? ThreadSchedState.Running
  105. : ThreadSchedState.None;
  106. ActiveCore = cpuCore;
  107. ObjSyncResult = KernelResult.ThreadNotStarted;
  108. DynamicPriority = priority;
  109. BasePriority = priority;
  110. CurrentCore = cpuCore;
  111. IsPinned = false;
  112. _entrypoint = entrypoint;
  113. _customThreadStart = customThreadStart;
  114. if (type == ThreadType.User)
  115. {
  116. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != Result.Success)
  117. {
  118. return KernelResult.OutOfMemory;
  119. }
  120. MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
  121. }
  122. bool is64Bits;
  123. if (owner != null)
  124. {
  125. Owner = owner;
  126. owner.IncrementReferenceCount();
  127. owner.IncrementThreadCount();
  128. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  129. }
  130. else
  131. {
  132. is64Bits = true;
  133. }
  134. HostThread = new Thread(ThreadStart);
  135. Context = owner?.CreateExecutionContext() ?? new ProcessExecutionContext();
  136. ThreadContext = new KThreadContext(Context);
  137. Context.IsAarch32 = !is64Bits;
  138. Context.SetX(0, argsPtr);
  139. if (is64Bits)
  140. {
  141. Context.SetX(18, KSystemControl.GenerateRandom() | 1);
  142. Context.SetX(31, stackTop);
  143. }
  144. else
  145. {
  146. Context.SetX(13, (uint)stackTop);
  147. }
  148. Context.TpidrroEl0 = (long)_tlsAddress;
  149. ThreadUid = KernelContext.NewThreadUid();
  150. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  151. _hasBeenInitialized = true;
  152. _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask;
  153. if (owner != null)
  154. {
  155. owner.AddThread(this);
  156. if (owner.IsPaused)
  157. {
  158. KernelContext.CriticalSection.Enter();
  159. if (TerminationRequested)
  160. {
  161. KernelContext.CriticalSection.Leave();
  162. return Result.Success;
  163. }
  164. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  165. CombineForcePauseFlags();
  166. KernelContext.CriticalSection.Leave();
  167. }
  168. }
  169. return Result.Success;
  170. }
  171. public Result Start()
  172. {
  173. if (!KernelContext.KernelInitialized)
  174. {
  175. KernelContext.CriticalSection.Enter();
  176. if (!TerminationRequested)
  177. {
  178. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  179. CombineForcePauseFlags();
  180. }
  181. KernelContext.CriticalSection.Leave();
  182. }
  183. Result result = KernelResult.ThreadTerminating;
  184. KernelContext.CriticalSection.Enter();
  185. if (!ShallBeTerminated)
  186. {
  187. KThread currentThread = KernelStatic.GetCurrentThread();
  188. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  189. {
  190. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  191. {
  192. result = KernelResult.InvalidState;
  193. break;
  194. }
  195. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  196. {
  197. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  198. {
  199. CombineForcePauseFlags();
  200. }
  201. SetNewSchedFlags(ThreadSchedState.Running);
  202. StartHostThread();
  203. result = Result.Success;
  204. break;
  205. }
  206. else
  207. {
  208. currentThread.CombineForcePauseFlags();
  209. KernelContext.CriticalSection.Leave();
  210. KernelContext.CriticalSection.Enter();
  211. if (currentThread.ShallBeTerminated)
  212. {
  213. break;
  214. }
  215. }
  216. }
  217. }
  218. KernelContext.CriticalSection.Leave();
  219. return result;
  220. }
  221. public ThreadSchedState PrepareForTermination()
  222. {
  223. KernelContext.CriticalSection.Enter();
  224. if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this)
  225. {
  226. Owner.UnpinThread(this);
  227. }
  228. ThreadSchedState result;
  229. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  230. {
  231. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  232. {
  233. SchedFlags = ThreadSchedState.TerminationPending;
  234. }
  235. else
  236. {
  237. if (_forcePauseFlags != ThreadSchedState.None)
  238. {
  239. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  240. ThreadSchedState oldSchedFlags = SchedFlags;
  241. SchedFlags &= ThreadSchedState.LowMask;
  242. AdjustScheduling(oldSchedFlags);
  243. }
  244. if (BasePriority >= 0x10)
  245. {
  246. SetPriority(0xF);
  247. }
  248. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  249. {
  250. // TODO: GIC distributor stuffs (sgir changes ect)
  251. Context.RequestInterrupt();
  252. }
  253. SignaledObj = null;
  254. ObjSyncResult = KernelResult.ThreadTerminating;
  255. ReleaseAndResume();
  256. }
  257. }
  258. result = SchedFlags;
  259. KernelContext.CriticalSection.Leave();
  260. return result & ThreadSchedState.LowMask;
  261. }
  262. public void Terminate()
  263. {
  264. ThreadSchedState state = PrepareForTermination();
  265. if (state != ThreadSchedState.TerminationPending)
  266. {
  267. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  268. }
  269. }
  270. public void HandlePostSyscall()
  271. {
  272. ThreadSchedState state;
  273. do
  274. {
  275. if (TerminationRequested)
  276. {
  277. Exit();
  278. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  279. break;
  280. }
  281. KernelContext.CriticalSection.Enter();
  282. if (TerminationRequested)
  283. {
  284. state = ThreadSchedState.TerminationPending;
  285. }
  286. else
  287. {
  288. if (_forcePauseFlags != ThreadSchedState.None)
  289. {
  290. CombineForcePauseFlags();
  291. }
  292. state = ThreadSchedState.Running;
  293. }
  294. KernelContext.CriticalSection.Leave();
  295. } while (state == ThreadSchedState.TerminationPending);
  296. }
  297. public void Exit()
  298. {
  299. // TODO: Debug event.
  300. if (Owner != null)
  301. {
  302. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  303. _hasBeenReleased = true;
  304. }
  305. KernelContext.CriticalSection.Enter();
  306. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  307. _forcePausePermissionFlags = 0;
  308. bool decRef = ExitImpl();
  309. Context.StopRunning();
  310. KernelContext.CriticalSection.Leave();
  311. if (decRef)
  312. {
  313. DecrementReferenceCount();
  314. }
  315. }
  316. private bool ExitImpl()
  317. {
  318. KernelContext.CriticalSection.Enter();
  319. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  320. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  321. Signal();
  322. KernelContext.CriticalSection.Leave();
  323. return decRef;
  324. }
  325. private int GetEffectiveRunningCore()
  326. {
  327. for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++)
  328. {
  329. if (KernelContext.Schedulers[coreNumber].CurrentThread == this)
  330. {
  331. return coreNumber;
  332. }
  333. }
  334. return -1;
  335. }
  336. public Result Sleep(long timeout)
  337. {
  338. KernelContext.CriticalSection.Enter();
  339. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  340. {
  341. KernelContext.CriticalSection.Leave();
  342. return KernelResult.ThreadTerminating;
  343. }
  344. SetNewSchedFlags(ThreadSchedState.Paused);
  345. if (timeout > 0)
  346. {
  347. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  348. }
  349. KernelContext.CriticalSection.Leave();
  350. if (timeout > 0)
  351. {
  352. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  353. }
  354. return Result.Success;
  355. }
  356. public void SetPriority(int priority)
  357. {
  358. KernelContext.CriticalSection.Enter();
  359. if (IsPinned)
  360. {
  361. _originalBasePriority = priority;
  362. }
  363. else
  364. {
  365. BasePriority = priority;
  366. }
  367. UpdatePriorityInheritance();
  368. KernelContext.CriticalSection.Leave();
  369. }
  370. public void Suspend(ThreadSchedState type)
  371. {
  372. _forcePauseFlags |= type;
  373. CombineForcePauseFlags();
  374. }
  375. public void Resume(ThreadSchedState type)
  376. {
  377. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  378. _forcePauseFlags &= ~type;
  379. if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
  380. {
  381. ThreadSchedState oldSchedFlags = SchedFlags;
  382. SchedFlags &= ThreadSchedState.LowMask;
  383. AdjustScheduling(oldSchedFlags);
  384. }
  385. }
  386. public Result SetActivity(bool pause)
  387. {
  388. lock (_activityOperationLock)
  389. {
  390. Result result = Result.Success;
  391. KernelContext.CriticalSection.Enter();
  392. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  393. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  394. {
  395. KernelContext.CriticalSection.Leave();
  396. return KernelResult.InvalidState;
  397. }
  398. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  399. {
  400. if (pause)
  401. {
  402. // Pause, the force pause flag should be clear (thread is NOT paused).
  403. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  404. {
  405. Suspend(ThreadSchedState.ThreadPauseFlag);
  406. }
  407. else
  408. {
  409. result = KernelResult.InvalidState;
  410. }
  411. }
  412. else
  413. {
  414. // Unpause, the force pause flag should be set (thread is paused).
  415. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  416. {
  417. Resume(ThreadSchedState.ThreadPauseFlag);
  418. }
  419. else
  420. {
  421. result = KernelResult.InvalidState;
  422. }
  423. }
  424. }
  425. KernelContext.CriticalSection.Leave();
  426. if (result == Result.Success && pause)
  427. {
  428. bool isThreadRunning = true;
  429. while (isThreadRunning)
  430. {
  431. KernelContext.CriticalSection.Enter();
  432. if (TerminationRequested)
  433. {
  434. KernelContext.CriticalSection.Leave();
  435. break;
  436. }
  437. isThreadRunning = false;
  438. if (IsPinned)
  439. {
  440. KThread currentThread = KernelStatic.GetCurrentThread();
  441. if (currentThread.TerminationRequested)
  442. {
  443. KernelContext.CriticalSection.Leave();
  444. result = KernelResult.ThreadTerminating;
  445. break;
  446. }
  447. _pinnedWaiters.AddLast(currentThread);
  448. currentThread.Reschedule(ThreadSchedState.Paused);
  449. }
  450. else
  451. {
  452. isThreadRunning = GetEffectiveRunningCore() >= 0;
  453. }
  454. KernelContext.CriticalSection.Leave();
  455. }
  456. }
  457. return result;
  458. }
  459. }
  460. public Result GetThreadContext3(out ThreadContext context)
  461. {
  462. context = default;
  463. lock (_activityOperationLock)
  464. {
  465. KernelContext.CriticalSection.Enter();
  466. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  467. {
  468. KernelContext.CriticalSection.Leave();
  469. return KernelResult.InvalidState;
  470. }
  471. if (!TerminationRequested)
  472. {
  473. context = GetCurrentContext();
  474. }
  475. KernelContext.CriticalSection.Leave();
  476. }
  477. return Result.Success;
  478. }
  479. private static uint GetPsr(IExecutionContext context)
  480. {
  481. return context.Pstate & 0xFF0FFE20;
  482. }
  483. private ThreadContext GetCurrentContext()
  484. {
  485. const int MaxRegistersAArch32 = 15;
  486. const int MaxFpuRegistersAArch32 = 16;
  487. ThreadContext context = new ThreadContext();
  488. if (Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit))
  489. {
  490. for (int i = 0; i < context.Registers.Length; i++)
  491. {
  492. context.Registers[i] = Context.GetX(i);
  493. }
  494. for (int i = 0; i < context.FpuRegisters.Length; i++)
  495. {
  496. context.FpuRegisters[i] = Context.GetV(i);
  497. }
  498. context.Fp = Context.GetX(29);
  499. context.Lr = Context.GetX(30);
  500. context.Sp = Context.GetX(31);
  501. context.Pc = Context.Pc;
  502. context.Pstate = GetPsr(Context);
  503. context.Tpidr = (ulong)Context.TpidrroEl0;
  504. }
  505. else
  506. {
  507. for (int i = 0; i < MaxRegistersAArch32; i++)
  508. {
  509. context.Registers[i] = (uint)Context.GetX(i);
  510. }
  511. for (int i = 0; i < MaxFpuRegistersAArch32; i++)
  512. {
  513. context.FpuRegisters[i] = Context.GetV(i);
  514. }
  515. context.Pc = (uint)Context.Pc;
  516. context.Pstate = GetPsr(Context);
  517. context.Tpidr = (uint)Context.TpidrroEl0;
  518. }
  519. context.Fpcr = (uint)Context.Fpcr;
  520. context.Fpsr = (uint)Context.Fpsr;
  521. return context;
  522. }
  523. public void CancelSynchronization()
  524. {
  525. KernelContext.CriticalSection.Enter();
  526. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  527. {
  528. SyncCancelled = true;
  529. }
  530. else if (Withholder != null)
  531. {
  532. Withholder.Remove(WithholderNode);
  533. SetNewSchedFlags(ThreadSchedState.Running);
  534. Withholder = null;
  535. SyncCancelled = true;
  536. }
  537. else
  538. {
  539. SignaledObj = null;
  540. ObjSyncResult = KernelResult.Cancelled;
  541. SetNewSchedFlags(ThreadSchedState.Running);
  542. SyncCancelled = false;
  543. }
  544. KernelContext.CriticalSection.Leave();
  545. }
  546. public Result SetCoreAndAffinityMask(int newCore, ulong newAffinityMask)
  547. {
  548. lock (_activityOperationLock)
  549. {
  550. KernelContext.CriticalSection.Enter();
  551. bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0;
  552. // The value -3 is "do not change the preferred core".
  553. if (newCore == -3)
  554. {
  555. newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore;
  556. if ((newAffinityMask & (1UL << newCore)) == 0)
  557. {
  558. KernelContext.CriticalSection.Leave();
  559. return KernelResult.InvalidCombination;
  560. }
  561. }
  562. if (isCoreMigrationDisabled)
  563. {
  564. _originalPreferredCore = newCore;
  565. _originalAffinityMask = newAffinityMask;
  566. }
  567. else
  568. {
  569. ulong oldAffinityMask = AffinityMask;
  570. PreferredCore = newCore;
  571. AffinityMask = newAffinityMask;
  572. if (oldAffinityMask != newAffinityMask)
  573. {
  574. int oldCore = ActiveCore;
  575. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  576. {
  577. if (PreferredCore < 0)
  578. {
  579. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount(AffinityMask);
  580. }
  581. else
  582. {
  583. ActiveCore = PreferredCore;
  584. }
  585. }
  586. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  587. }
  588. }
  589. KernelContext.CriticalSection.Leave();
  590. bool targetThreadPinned = true;
  591. while (targetThreadPinned)
  592. {
  593. KernelContext.CriticalSection.Enter();
  594. if (TerminationRequested)
  595. {
  596. KernelContext.CriticalSection.Leave();
  597. break;
  598. }
  599. targetThreadPinned = false;
  600. int coreNumber = GetEffectiveRunningCore();
  601. bool isPinnedThreadCurrentlyRunning = coreNumber >= 0;
  602. if (isPinnedThreadCurrentlyRunning && ((1UL << coreNumber) & AffinityMask) == 0)
  603. {
  604. if (IsPinned)
  605. {
  606. KThread currentThread = KernelStatic.GetCurrentThread();
  607. if (currentThread.TerminationRequested)
  608. {
  609. KernelContext.CriticalSection.Leave();
  610. return KernelResult.ThreadTerminating;
  611. }
  612. _pinnedWaiters.AddLast(currentThread);
  613. currentThread.Reschedule(ThreadSchedState.Paused);
  614. }
  615. else
  616. {
  617. targetThreadPinned = true;
  618. }
  619. }
  620. KernelContext.CriticalSection.Leave();
  621. }
  622. return Result.Success;
  623. }
  624. }
  625. private void CombineForcePauseFlags()
  626. {
  627. ThreadSchedState oldFlags = SchedFlags;
  628. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  629. SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags);
  630. AdjustScheduling(oldFlags);
  631. }
  632. private void SetNewSchedFlags(ThreadSchedState newFlags)
  633. {
  634. KernelContext.CriticalSection.Enter();
  635. ThreadSchedState oldFlags = SchedFlags;
  636. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  637. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  638. {
  639. AdjustScheduling(oldFlags);
  640. }
  641. KernelContext.CriticalSection.Leave();
  642. }
  643. public void ReleaseAndResume()
  644. {
  645. KernelContext.CriticalSection.Enter();
  646. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  647. {
  648. if (Withholder != null)
  649. {
  650. Withholder.Remove(WithholderNode);
  651. SetNewSchedFlags(ThreadSchedState.Running);
  652. Withholder = null;
  653. }
  654. else
  655. {
  656. SetNewSchedFlags(ThreadSchedState.Running);
  657. }
  658. }
  659. KernelContext.CriticalSection.Leave();
  660. }
  661. public void Reschedule(ThreadSchedState newFlags)
  662. {
  663. KernelContext.CriticalSection.Enter();
  664. ThreadSchedState oldFlags = SchedFlags;
  665. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  666. (newFlags & ThreadSchedState.LowMask);
  667. AdjustScheduling(oldFlags);
  668. KernelContext.CriticalSection.Leave();
  669. }
  670. public void AddMutexWaiter(KThread requester)
  671. {
  672. AddToMutexWaitersList(requester);
  673. requester.MutexOwner = this;
  674. UpdatePriorityInheritance();
  675. }
  676. public void RemoveMutexWaiter(KThread thread)
  677. {
  678. if (thread._mutexWaiterNode?.List != null)
  679. {
  680. _mutexWaiters.Remove(thread._mutexWaiterNode);
  681. }
  682. thread.MutexOwner = null;
  683. UpdatePriorityInheritance();
  684. }
  685. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  686. {
  687. count = 0;
  688. if (_mutexWaiters.First == null)
  689. {
  690. return null;
  691. }
  692. KThread newMutexOwner = null;
  693. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  694. do
  695. {
  696. // Skip all threads that are not waiting for this mutex.
  697. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  698. {
  699. currentNode = currentNode.Next;
  700. }
  701. if (currentNode == null)
  702. {
  703. break;
  704. }
  705. LinkedListNode<KThread> nextNode = currentNode.Next;
  706. _mutexWaiters.Remove(currentNode);
  707. currentNode.Value.MutexOwner = newMutexOwner;
  708. if (newMutexOwner != null)
  709. {
  710. // New owner was already selected, re-insert on new owner list.
  711. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  712. }
  713. else
  714. {
  715. // New owner not selected yet, use current thread.
  716. newMutexOwner = currentNode.Value;
  717. }
  718. count++;
  719. currentNode = nextNode;
  720. }
  721. while (currentNode != null);
  722. if (newMutexOwner != null)
  723. {
  724. UpdatePriorityInheritance();
  725. newMutexOwner.UpdatePriorityInheritance();
  726. }
  727. return newMutexOwner;
  728. }
  729. private void UpdatePriorityInheritance()
  730. {
  731. // If any of the threads waiting for the mutex has
  732. // higher priority than the current thread, then
  733. // the current thread inherits that priority.
  734. int highestPriority = BasePriority;
  735. if (_mutexWaiters.First != null)
  736. {
  737. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  738. if (waitingDynamicPriority < highestPriority)
  739. {
  740. highestPriority = waitingDynamicPriority;
  741. }
  742. }
  743. if (highestPriority != DynamicPriority)
  744. {
  745. int oldPriority = DynamicPriority;
  746. DynamicPriority = highestPriority;
  747. AdjustSchedulingForNewPriority(oldPriority);
  748. if (MutexOwner != null)
  749. {
  750. // Remove and re-insert to ensure proper sorting based on new priority.
  751. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  752. MutexOwner.AddToMutexWaitersList(this);
  753. MutexOwner.UpdatePriorityInheritance();
  754. }
  755. }
  756. }
  757. private void AddToMutexWaitersList(KThread thread)
  758. {
  759. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  760. int currentPriority = thread.DynamicPriority;
  761. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  762. {
  763. nextPrio = nextPrio.Next;
  764. }
  765. if (nextPrio != null)
  766. {
  767. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  768. }
  769. else
  770. {
  771. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  772. }
  773. }
  774. private void AdjustScheduling(ThreadSchedState oldFlags)
  775. {
  776. if (oldFlags == SchedFlags)
  777. {
  778. return;
  779. }
  780. if (!IsSchedulable)
  781. {
  782. if (!_forcedUnschedulable)
  783. {
  784. // Ensure our thread is running and we have an event.
  785. StartHostThread();
  786. // If the thread is not schedulable, we want to just run or pause
  787. // it directly as we don't care about priority or the core it is
  788. // running on in this case.
  789. if (SchedFlags == ThreadSchedState.Running)
  790. {
  791. _schedulerWaitEvent.Set();
  792. }
  793. else
  794. {
  795. _schedulerWaitEvent.Reset();
  796. }
  797. }
  798. return;
  799. }
  800. if (oldFlags == ThreadSchedState.Running)
  801. {
  802. // Was running, now it's stopped.
  803. if (ActiveCore >= 0)
  804. {
  805. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  806. }
  807. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  808. {
  809. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  810. {
  811. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  812. }
  813. }
  814. }
  815. else if (SchedFlags == ThreadSchedState.Running)
  816. {
  817. // Was stopped, now it's running.
  818. if (ActiveCore >= 0)
  819. {
  820. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  821. }
  822. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  823. {
  824. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  825. {
  826. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  827. }
  828. }
  829. }
  830. KernelContext.ThreadReselectionRequested = true;
  831. }
  832. private void AdjustSchedulingForNewPriority(int oldPriority)
  833. {
  834. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  835. {
  836. return;
  837. }
  838. // Remove thread from the old priority queues.
  839. if (ActiveCore >= 0)
  840. {
  841. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  842. }
  843. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  844. {
  845. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  846. {
  847. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  848. }
  849. }
  850. // Add thread to the new priority queues.
  851. KThread currentThread = KernelStatic.GetCurrentThread();
  852. if (ActiveCore >= 0)
  853. {
  854. if (currentThread == this)
  855. {
  856. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  857. }
  858. else
  859. {
  860. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  861. }
  862. }
  863. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  864. {
  865. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  866. {
  867. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  868. }
  869. }
  870. KernelContext.ThreadReselectionRequested = true;
  871. }
  872. private void AdjustSchedulingForNewAffinity(ulong oldAffinityMask, int oldCore)
  873. {
  874. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  875. {
  876. return;
  877. }
  878. // Remove thread from the old priority queues.
  879. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  880. {
  881. if (((oldAffinityMask >> core) & 1) != 0)
  882. {
  883. if (core == oldCore)
  884. {
  885. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  886. }
  887. else
  888. {
  889. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  890. }
  891. }
  892. }
  893. // Add thread to the new priority queues.
  894. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  895. {
  896. if (((AffinityMask >> core) & 1) != 0)
  897. {
  898. if (core == ActiveCore)
  899. {
  900. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  901. }
  902. else
  903. {
  904. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  905. }
  906. }
  907. }
  908. KernelContext.ThreadReselectionRequested = true;
  909. }
  910. public void SetEntryArguments(long argsPtr, int threadHandle)
  911. {
  912. Context.SetX(0, (ulong)argsPtr);
  913. Context.SetX(1, (ulong)threadHandle);
  914. }
  915. public void TimeUp()
  916. {
  917. ReleaseAndResume();
  918. }
  919. public string GetGuestStackTrace()
  920. {
  921. return Owner.Debugger.GetGuestStackTrace(this);
  922. }
  923. public string GetGuestRegisterPrintout()
  924. {
  925. return Owner.Debugger.GetCpuRegisterPrintout(this);
  926. }
  927. public void PrintGuestStackTrace()
  928. {
  929. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  930. }
  931. public void PrintGuestRegisterPrintout()
  932. {
  933. Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
  934. }
  935. public void AddCpuTime(long ticks)
  936. {
  937. Interlocked.Add(ref _totalTimeRunning, ticks);
  938. }
  939. public void StartHostThread()
  940. {
  941. if (_schedulerWaitEvent == null)
  942. {
  943. var schedulerWaitEvent = new ManualResetEvent(false);
  944. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  945. {
  946. HostThread.Start();
  947. }
  948. else
  949. {
  950. schedulerWaitEvent.Dispose();
  951. }
  952. }
  953. }
  954. private void ThreadStart()
  955. {
  956. _schedulerWaitEvent.WaitOne();
  957. KernelStatic.SetKernelContext(KernelContext, this);
  958. if (_customThreadStart != null)
  959. {
  960. _customThreadStart();
  961. // Ensure that anything trying to join the HLE thread is unblocked.
  962. Exit();
  963. HandlePostSyscall();
  964. }
  965. else
  966. {
  967. Owner.Context.Execute(Context, _entrypoint);
  968. }
  969. Context.Dispose();
  970. _schedulerWaitEvent.Dispose();
  971. }
  972. public void MakeUnschedulable()
  973. {
  974. _forcedUnschedulable = true;
  975. }
  976. public override bool IsSignaled()
  977. {
  978. return _hasExited != 0;
  979. }
  980. protected override void Destroy()
  981. {
  982. if (_hasBeenInitialized)
  983. {
  984. FreeResources();
  985. bool released = Owner != null || _hasBeenReleased;
  986. if (Owner != null)
  987. {
  988. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  989. Owner.DecrementReferenceCount();
  990. }
  991. else
  992. {
  993. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  994. }
  995. }
  996. }
  997. private void FreeResources()
  998. {
  999. Owner?.RemoveThread(this);
  1000. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != Result.Success)
  1001. {
  1002. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  1003. }
  1004. KernelContext.CriticalSection.Enter();
  1005. // Wake up all threads that may be waiting for a mutex being held by this thread.
  1006. foreach (KThread thread in _mutexWaiters)
  1007. {
  1008. thread.MutexOwner = null;
  1009. thread._originalPreferredCore = 0;
  1010. thread.ObjSyncResult = KernelResult.InvalidState;
  1011. thread.ReleaseAndResume();
  1012. }
  1013. KernelContext.CriticalSection.Leave();
  1014. Owner?.DecrementThreadCountAndTerminateIfZero();
  1015. }
  1016. public void Pin()
  1017. {
  1018. IsPinned = true;
  1019. _coreMigrationDisableCount++;
  1020. int activeCore = ActiveCore;
  1021. _originalPreferredCore = PreferredCore;
  1022. _originalAffinityMask = AffinityMask;
  1023. ActiveCore = CurrentCore;
  1024. PreferredCore = CurrentCore;
  1025. AffinityMask = 1UL << CurrentCore;
  1026. if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask)
  1027. {
  1028. AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore);
  1029. }
  1030. _originalBasePriority = BasePriority;
  1031. BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1);
  1032. UpdatePriorityInheritance();
  1033. // Disallows thread pausing
  1034. _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag;
  1035. CombineForcePauseFlags();
  1036. // TODO: Assign reduced SVC permissions
  1037. }
  1038. public void Unpin()
  1039. {
  1040. IsPinned = false;
  1041. _coreMigrationDisableCount--;
  1042. ulong affinityMask = AffinityMask;
  1043. int activeCore = ActiveCore;
  1044. PreferredCore = _originalPreferredCore;
  1045. AffinityMask = _originalAffinityMask;
  1046. if (AffinityMask != affinityMask)
  1047. {
  1048. if ((AffinityMask & 1UL << ActiveCore) != 0)
  1049. {
  1050. if (PreferredCore >= 0)
  1051. {
  1052. ActiveCore = PreferredCore;
  1053. }
  1054. else
  1055. {
  1056. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  1057. }
  1058. AdjustSchedulingForNewAffinity(affinityMask, activeCore);
  1059. }
  1060. }
  1061. BasePriority = _originalBasePriority;
  1062. UpdatePriorityInheritance();
  1063. if (!TerminationRequested)
  1064. {
  1065. // Allows thread pausing
  1066. _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag;
  1067. CombineForcePauseFlags();
  1068. // TODO: Restore SVC permissions
  1069. }
  1070. // Wake up waiters
  1071. foreach (KThread waiter in _pinnedWaiters)
  1072. {
  1073. waiter.ReleaseAndResume();
  1074. }
  1075. _pinnedWaiters.Clear();
  1076. }
  1077. public void SynchronizePreemptionState()
  1078. {
  1079. KernelContext.CriticalSection.Enter();
  1080. if (Owner != null && Owner.PinnedThreads[CurrentCore] == this)
  1081. {
  1082. ClearUserInterruptFlag();
  1083. Owner.UnpinThread(this);
  1084. }
  1085. KernelContext.CriticalSection.Leave();
  1086. }
  1087. public ushort GetUserDisableCount()
  1088. {
  1089. return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset);
  1090. }
  1091. public void SetUserInterruptFlag()
  1092. {
  1093. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1);
  1094. }
  1095. public void ClearUserInterruptFlag()
  1096. {
  1097. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0);
  1098. }
  1099. }
  1100. }