KThread.cs 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using Ryujinx.HLE.HOS.Kernel.SupervisorCall;
  6. using System;
  7. using System.Collections.Generic;
  8. using System.Numerics;
  9. using System.Threading;
  10. namespace Ryujinx.HLE.HOS.Kernel.Threading
  11. {
  12. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  13. {
  14. private const int TlsUserDisableCountOffset = 0x100;
  15. private const int TlsUserInterruptFlagOffset = 0x102;
  16. public const int MaxWaitSyncObjects = 64;
  17. private ManualResetEvent _schedulerWaitEvent;
  18. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  19. public Thread HostThread { get; private set; }
  20. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  21. public KThreadContext ThreadContext { get; private set; }
  22. public int DynamicPriority { get; set; }
  23. public ulong AffinityMask { get; set; }
  24. public ulong ThreadUid { get; private set; }
  25. private long _totalTimeRunning;
  26. public long TotalTimeRunning => _totalTimeRunning;
  27. public KSynchronizationObject SignaledObj { get; set; }
  28. public ulong CondVarAddress { get; set; }
  29. private ulong _entrypoint;
  30. private ThreadStart _customThreadStart;
  31. private bool _forcedUnschedulable;
  32. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  33. public ulong MutexAddress { get; set; }
  34. public int KernelWaitersCount { get; private set; }
  35. public KProcess Owner { get; private set; }
  36. private ulong _tlsAddress;
  37. public ulong TlsAddress => _tlsAddress;
  38. public KSynchronizationObject[] WaitSyncObjects { get; }
  39. public int[] WaitSyncHandles { get; }
  40. public long LastScheduledTime { get; set; }
  41. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  42. public LinkedList<KThread> Withholder { get; set; }
  43. public LinkedListNode<KThread> WithholderNode { get; set; }
  44. public LinkedListNode<KThread> ProcessListNode { get; set; }
  45. private LinkedList<KThread> _mutexWaiters;
  46. private LinkedListNode<KThread> _mutexWaiterNode;
  47. private LinkedList<KThread> _pinnedWaiters;
  48. public KThread MutexOwner { get; private set; }
  49. public int ThreadHandleForUserMutex { get; set; }
  50. private ThreadSchedState _forcePauseFlags;
  51. private ThreadSchedState _forcePausePermissionFlags;
  52. public KernelResult ObjSyncResult { get; set; }
  53. public int BasePriority { get; set; }
  54. public int PreferredCore { get; set; }
  55. public int CurrentCore { get; set; }
  56. public int ActiveCore { get; set; }
  57. public bool IsPinned { get; private set; }
  58. private ulong _originalAffinityMask;
  59. private int _originalPreferredCore;
  60. private int _originalBasePriority;
  61. private int _coreMigrationDisableCount;
  62. public ThreadSchedState SchedFlags { get; private set; }
  63. private int _shallBeTerminated;
  64. public bool ShallBeTerminated
  65. {
  66. get => _shallBeTerminated != 0;
  67. set => _shallBeTerminated = value ? 1 : 0;
  68. }
  69. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  70. public bool SyncCancelled { get; set; }
  71. public bool WaitingSync { get; set; }
  72. private int _hasExited;
  73. private bool _hasBeenInitialized;
  74. private bool _hasBeenReleased;
  75. public bool WaitingInArbitration { get; set; }
  76. public long LastPc { get; set; }
  77. private object ActivityOperationLock = new object();
  78. public KThread(KernelContext context) : base(context)
  79. {
  80. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  81. WaitSyncHandles = new int[MaxWaitSyncObjects];
  82. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  83. _mutexWaiters = new LinkedList<KThread>();
  84. _pinnedWaiters = new LinkedList<KThread>();
  85. }
  86. public KernelResult Initialize(
  87. ulong entrypoint,
  88. ulong argsPtr,
  89. ulong stackTop,
  90. int priority,
  91. int cpuCore,
  92. KProcess owner,
  93. ThreadType type,
  94. ThreadStart customThreadStart = null)
  95. {
  96. if ((uint)type > 3)
  97. {
  98. throw new ArgumentException($"Invalid thread type \"{type}\".");
  99. }
  100. ThreadContext = new KThreadContext();
  101. PreferredCore = cpuCore;
  102. AffinityMask |= 1UL << cpuCore;
  103. SchedFlags = type == ThreadType.Dummy
  104. ? ThreadSchedState.Running
  105. : ThreadSchedState.None;
  106. ActiveCore = cpuCore;
  107. ObjSyncResult = KernelResult.ThreadNotStarted;
  108. DynamicPriority = priority;
  109. BasePriority = priority;
  110. CurrentCore = cpuCore;
  111. IsPinned = false;
  112. _entrypoint = entrypoint;
  113. _customThreadStart = customThreadStart;
  114. if (type == ThreadType.User)
  115. {
  116. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  117. {
  118. return KernelResult.OutOfMemory;
  119. }
  120. MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
  121. }
  122. bool is64Bits;
  123. if (owner != null)
  124. {
  125. Owner = owner;
  126. owner.IncrementReferenceCount();
  127. owner.IncrementThreadCount();
  128. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  129. }
  130. else
  131. {
  132. is64Bits = true;
  133. }
  134. HostThread = new Thread(ThreadStart);
  135. Context = CpuContext.CreateExecutionContext();
  136. Context.IsAarch32 = !is64Bits;
  137. Context.SetX(0, argsPtr);
  138. if (is64Bits)
  139. {
  140. Context.SetX(18, KSystemControl.GenerateRandom() | 1);
  141. Context.SetX(31, stackTop);
  142. }
  143. else
  144. {
  145. Context.SetX(13, (uint)stackTop);
  146. }
  147. Context.CntfrqEl0 = 19200000;
  148. Context.Tpidr = (long)_tlsAddress;
  149. ThreadUid = KernelContext.NewThreadUid();
  150. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  151. _hasBeenInitialized = true;
  152. _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask;
  153. if (owner != null)
  154. {
  155. owner.SubscribeThreadEventHandlers(Context);
  156. owner.AddThread(this);
  157. if (owner.IsPaused)
  158. {
  159. KernelContext.CriticalSection.Enter();
  160. if (TerminationRequested)
  161. {
  162. KernelContext.CriticalSection.Leave();
  163. return KernelResult.Success;
  164. }
  165. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  166. CombineForcePauseFlags();
  167. KernelContext.CriticalSection.Leave();
  168. }
  169. }
  170. return KernelResult.Success;
  171. }
  172. public KernelResult Start()
  173. {
  174. if (!KernelContext.KernelInitialized)
  175. {
  176. KernelContext.CriticalSection.Enter();
  177. if (!TerminationRequested)
  178. {
  179. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  180. CombineForcePauseFlags();
  181. }
  182. KernelContext.CriticalSection.Leave();
  183. }
  184. KernelResult result = KernelResult.ThreadTerminating;
  185. KernelContext.CriticalSection.Enter();
  186. if (!ShallBeTerminated)
  187. {
  188. KThread currentThread = KernelStatic.GetCurrentThread();
  189. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  190. {
  191. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  192. {
  193. result = KernelResult.InvalidState;
  194. break;
  195. }
  196. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  197. {
  198. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  199. {
  200. CombineForcePauseFlags();
  201. }
  202. SetNewSchedFlags(ThreadSchedState.Running);
  203. StartHostThread();
  204. result = KernelResult.Success;
  205. break;
  206. }
  207. else
  208. {
  209. currentThread.CombineForcePauseFlags();
  210. KernelContext.CriticalSection.Leave();
  211. KernelContext.CriticalSection.Enter();
  212. if (currentThread.ShallBeTerminated)
  213. {
  214. break;
  215. }
  216. }
  217. }
  218. }
  219. KernelContext.CriticalSection.Leave();
  220. return result;
  221. }
  222. public ThreadSchedState PrepareForTermination()
  223. {
  224. KernelContext.CriticalSection.Enter();
  225. if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this)
  226. {
  227. Owner.UnpinThread(this);
  228. }
  229. ThreadSchedState result;
  230. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  231. {
  232. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  233. {
  234. SchedFlags = ThreadSchedState.TerminationPending;
  235. }
  236. else
  237. {
  238. if (_forcePauseFlags != ThreadSchedState.None)
  239. {
  240. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  241. ThreadSchedState oldSchedFlags = SchedFlags;
  242. SchedFlags &= ThreadSchedState.LowMask;
  243. AdjustScheduling(oldSchedFlags);
  244. }
  245. if (BasePriority >= 0x10)
  246. {
  247. SetPriority(0xF);
  248. }
  249. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  250. {
  251. // TODO: GIC distributor stuffs (sgir changes ect)
  252. Context.RequestInterrupt();
  253. }
  254. SignaledObj = null;
  255. ObjSyncResult = KernelResult.ThreadTerminating;
  256. ReleaseAndResume();
  257. }
  258. }
  259. result = SchedFlags;
  260. KernelContext.CriticalSection.Leave();
  261. return result & ThreadSchedState.LowMask;
  262. }
  263. public void Terminate()
  264. {
  265. ThreadSchedState state = PrepareForTermination();
  266. if (state != ThreadSchedState.TerminationPending)
  267. {
  268. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  269. }
  270. }
  271. public void HandlePostSyscall()
  272. {
  273. ThreadSchedState state;
  274. do
  275. {
  276. if (TerminationRequested)
  277. {
  278. Exit();
  279. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  280. break;
  281. }
  282. KernelContext.CriticalSection.Enter();
  283. if (TerminationRequested)
  284. {
  285. state = ThreadSchedState.TerminationPending;
  286. }
  287. else
  288. {
  289. if (_forcePauseFlags != ThreadSchedState.None)
  290. {
  291. CombineForcePauseFlags();
  292. }
  293. state = ThreadSchedState.Running;
  294. }
  295. KernelContext.CriticalSection.Leave();
  296. } while (state == ThreadSchedState.TerminationPending);
  297. }
  298. public void Exit()
  299. {
  300. // TODO: Debug event.
  301. if (Owner != null)
  302. {
  303. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  304. _hasBeenReleased = true;
  305. }
  306. KernelContext.CriticalSection.Enter();
  307. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  308. _forcePausePermissionFlags = 0;
  309. bool decRef = ExitImpl();
  310. Context.StopRunning();
  311. KernelContext.CriticalSection.Leave();
  312. if (decRef)
  313. {
  314. DecrementReferenceCount();
  315. }
  316. }
  317. private bool ExitImpl()
  318. {
  319. KernelContext.CriticalSection.Enter();
  320. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  321. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  322. Signal();
  323. KernelContext.CriticalSection.Leave();
  324. return decRef;
  325. }
  326. private int GetEffectiveRunningCore()
  327. {
  328. for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++)
  329. {
  330. if (KernelContext.Schedulers[coreNumber].CurrentThread == this)
  331. {
  332. return coreNumber;
  333. }
  334. }
  335. return -1;
  336. }
  337. public KernelResult Sleep(long timeout)
  338. {
  339. KernelContext.CriticalSection.Enter();
  340. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  341. {
  342. KernelContext.CriticalSection.Leave();
  343. return KernelResult.ThreadTerminating;
  344. }
  345. SetNewSchedFlags(ThreadSchedState.Paused);
  346. if (timeout > 0)
  347. {
  348. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  349. }
  350. KernelContext.CriticalSection.Leave();
  351. if (timeout > 0)
  352. {
  353. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  354. }
  355. return 0;
  356. }
  357. public void SetPriority(int priority)
  358. {
  359. KernelContext.CriticalSection.Enter();
  360. if (IsPinned)
  361. {
  362. _originalBasePriority = priority;
  363. }
  364. else
  365. {
  366. BasePriority = priority;
  367. }
  368. UpdatePriorityInheritance();
  369. KernelContext.CriticalSection.Leave();
  370. }
  371. public void Suspend(ThreadSchedState type)
  372. {
  373. _forcePauseFlags |= type;
  374. CombineForcePauseFlags();
  375. }
  376. public void Resume(ThreadSchedState type)
  377. {
  378. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  379. _forcePauseFlags &= ~type;
  380. if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
  381. {
  382. ThreadSchedState oldSchedFlags = SchedFlags;
  383. SchedFlags &= ThreadSchedState.LowMask;
  384. AdjustScheduling(oldSchedFlags);
  385. }
  386. }
  387. public KernelResult SetActivity(bool pause)
  388. {
  389. lock (ActivityOperationLock)
  390. {
  391. KernelResult result = KernelResult.Success;
  392. KernelContext.CriticalSection.Enter();
  393. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  394. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  395. {
  396. KernelContext.CriticalSection.Leave();
  397. return KernelResult.InvalidState;
  398. }
  399. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  400. {
  401. if (pause)
  402. {
  403. // Pause, the force pause flag should be clear (thread is NOT paused).
  404. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  405. {
  406. Suspend(ThreadSchedState.ThreadPauseFlag);
  407. }
  408. else
  409. {
  410. result = KernelResult.InvalidState;
  411. }
  412. }
  413. else
  414. {
  415. // Unpause, the force pause flag should be set (thread is paused).
  416. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  417. {
  418. Resume(ThreadSchedState.ThreadPauseFlag);
  419. }
  420. else
  421. {
  422. result = KernelResult.InvalidState;
  423. }
  424. }
  425. }
  426. KernelContext.CriticalSection.Leave();
  427. if (result == KernelResult.Success && pause)
  428. {
  429. bool isThreadRunning = true;
  430. while (isThreadRunning)
  431. {
  432. KernelContext.CriticalSection.Enter();
  433. if (TerminationRequested)
  434. {
  435. KernelContext.CriticalSection.Leave();
  436. break;
  437. }
  438. isThreadRunning = false;
  439. if (IsPinned)
  440. {
  441. KThread currentThread = KernelStatic.GetCurrentThread();
  442. if (currentThread.TerminationRequested)
  443. {
  444. KernelContext.CriticalSection.Leave();
  445. result = KernelResult.ThreadTerminating;
  446. break;
  447. }
  448. _pinnedWaiters.AddLast(currentThread);
  449. currentThread.Reschedule(ThreadSchedState.Paused);
  450. }
  451. else
  452. {
  453. isThreadRunning = GetEffectiveRunningCore() >= 0;
  454. }
  455. KernelContext.CriticalSection.Leave();
  456. }
  457. }
  458. return result;
  459. }
  460. }
  461. public KernelResult GetThreadContext3(out ThreadContext context)
  462. {
  463. context = default;
  464. lock (ActivityOperationLock)
  465. {
  466. KernelContext.CriticalSection.Enter();
  467. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  468. {
  469. KernelContext.CriticalSection.Leave();
  470. return KernelResult.InvalidState;
  471. }
  472. if (!TerminationRequested)
  473. {
  474. context = GetCurrentContext();
  475. }
  476. KernelContext.CriticalSection.Leave();
  477. }
  478. return KernelResult.Success;
  479. }
  480. private static uint GetPsr(ARMeilleure.State.ExecutionContext context)
  481. {
  482. return context.Pstate & 0xFF0FFE20;
  483. }
  484. private ThreadContext GetCurrentContext()
  485. {
  486. const int MaxRegistersAArch32 = 15;
  487. const int MaxFpuRegistersAArch32 = 16;
  488. ThreadContext context = new ThreadContext();
  489. if (Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit))
  490. {
  491. for (int i = 0; i < context.Registers.Length; i++)
  492. {
  493. context.Registers[i] = Context.GetX(i);
  494. }
  495. for (int i = 0; i < context.FpuRegisters.Length; i++)
  496. {
  497. context.FpuRegisters[i] = Context.GetV(i);
  498. }
  499. context.Fp = Context.GetX(29);
  500. context.Lr = Context.GetX(30);
  501. context.Sp = Context.GetX(31);
  502. context.Pc = (ulong)LastPc;
  503. context.Pstate = GetPsr(Context);
  504. context.Tpidr = (ulong)Context.Tpidr;
  505. }
  506. else
  507. {
  508. for (int i = 0; i < MaxRegistersAArch32; i++)
  509. {
  510. context.Registers[i] = (uint)Context.GetX(i);
  511. }
  512. for (int i = 0; i < MaxFpuRegistersAArch32; i++)
  513. {
  514. context.FpuRegisters[i] = Context.GetV(i);
  515. }
  516. context.Pc = (uint)LastPc;
  517. context.Pstate = GetPsr(Context);
  518. context.Tpidr = (uint)Context.Tpidr;
  519. }
  520. context.Fpcr = (uint)Context.Fpcr;
  521. context.Fpsr = (uint)Context.Fpsr;
  522. return context;
  523. }
  524. public void CancelSynchronization()
  525. {
  526. KernelContext.CriticalSection.Enter();
  527. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  528. {
  529. SyncCancelled = true;
  530. }
  531. else if (Withholder != null)
  532. {
  533. Withholder.Remove(WithholderNode);
  534. SetNewSchedFlags(ThreadSchedState.Running);
  535. Withholder = null;
  536. SyncCancelled = true;
  537. }
  538. else
  539. {
  540. SignaledObj = null;
  541. ObjSyncResult = KernelResult.Cancelled;
  542. SetNewSchedFlags(ThreadSchedState.Running);
  543. SyncCancelled = false;
  544. }
  545. KernelContext.CriticalSection.Leave();
  546. }
  547. public KernelResult SetCoreAndAffinityMask(int newCore, ulong newAffinityMask)
  548. {
  549. lock (ActivityOperationLock)
  550. {
  551. KernelContext.CriticalSection.Enter();
  552. bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0;
  553. // The value -3 is "do not change the preferred core".
  554. if (newCore == -3)
  555. {
  556. newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore;
  557. if ((newAffinityMask & (1UL << newCore)) == 0)
  558. {
  559. KernelContext.CriticalSection.Leave();
  560. return KernelResult.InvalidCombination;
  561. }
  562. }
  563. if (isCoreMigrationDisabled)
  564. {
  565. _originalPreferredCore = newCore;
  566. _originalAffinityMask = newAffinityMask;
  567. }
  568. else
  569. {
  570. ulong oldAffinityMask = AffinityMask;
  571. PreferredCore = newCore;
  572. AffinityMask = newAffinityMask;
  573. if (oldAffinityMask != newAffinityMask)
  574. {
  575. int oldCore = ActiveCore;
  576. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  577. {
  578. if (PreferredCore < 0)
  579. {
  580. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount(AffinityMask);
  581. }
  582. else
  583. {
  584. ActiveCore = PreferredCore;
  585. }
  586. }
  587. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  588. }
  589. }
  590. KernelContext.CriticalSection.Leave();
  591. bool targetThreadPinned = true;
  592. while (targetThreadPinned)
  593. {
  594. KernelContext.CriticalSection.Enter();
  595. if (TerminationRequested)
  596. {
  597. KernelContext.CriticalSection.Leave();
  598. break;
  599. }
  600. targetThreadPinned = false;
  601. int coreNumber = GetEffectiveRunningCore();
  602. bool isPinnedThreadCurrentlyRunning = coreNumber >= 0;
  603. if (isPinnedThreadCurrentlyRunning && ((1UL << coreNumber) & AffinityMask) == 0)
  604. {
  605. if (IsPinned)
  606. {
  607. KThread currentThread = KernelStatic.GetCurrentThread();
  608. if (currentThread.TerminationRequested)
  609. {
  610. KernelContext.CriticalSection.Leave();
  611. return KernelResult.ThreadTerminating;
  612. }
  613. _pinnedWaiters.AddLast(currentThread);
  614. currentThread.Reschedule(ThreadSchedState.Paused);
  615. }
  616. else
  617. {
  618. targetThreadPinned = true;
  619. }
  620. }
  621. KernelContext.CriticalSection.Leave();
  622. }
  623. return KernelResult.Success;
  624. }
  625. }
  626. private void CombineForcePauseFlags()
  627. {
  628. ThreadSchedState oldFlags = SchedFlags;
  629. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  630. SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags);
  631. AdjustScheduling(oldFlags);
  632. }
  633. private void SetNewSchedFlags(ThreadSchedState newFlags)
  634. {
  635. KernelContext.CriticalSection.Enter();
  636. ThreadSchedState oldFlags = SchedFlags;
  637. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  638. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  639. {
  640. AdjustScheduling(oldFlags);
  641. }
  642. KernelContext.CriticalSection.Leave();
  643. }
  644. public void ReleaseAndResume()
  645. {
  646. KernelContext.CriticalSection.Enter();
  647. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  648. {
  649. if (Withholder != null)
  650. {
  651. Withholder.Remove(WithholderNode);
  652. SetNewSchedFlags(ThreadSchedState.Running);
  653. Withholder = null;
  654. }
  655. else
  656. {
  657. SetNewSchedFlags(ThreadSchedState.Running);
  658. }
  659. }
  660. KernelContext.CriticalSection.Leave();
  661. }
  662. public void Reschedule(ThreadSchedState newFlags)
  663. {
  664. KernelContext.CriticalSection.Enter();
  665. ThreadSchedState oldFlags = SchedFlags;
  666. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  667. (newFlags & ThreadSchedState.LowMask);
  668. AdjustScheduling(oldFlags);
  669. KernelContext.CriticalSection.Leave();
  670. }
  671. public void AddMutexWaiter(KThread requester)
  672. {
  673. AddToMutexWaitersList(requester);
  674. requester.MutexOwner = this;
  675. UpdatePriorityInheritance();
  676. }
  677. public void RemoveMutexWaiter(KThread thread)
  678. {
  679. if (thread._mutexWaiterNode?.List != null)
  680. {
  681. _mutexWaiters.Remove(thread._mutexWaiterNode);
  682. }
  683. thread.MutexOwner = null;
  684. UpdatePriorityInheritance();
  685. }
  686. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  687. {
  688. count = 0;
  689. if (_mutexWaiters.First == null)
  690. {
  691. return null;
  692. }
  693. KThread newMutexOwner = null;
  694. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  695. do
  696. {
  697. // Skip all threads that are not waiting for this mutex.
  698. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  699. {
  700. currentNode = currentNode.Next;
  701. }
  702. if (currentNode == null)
  703. {
  704. break;
  705. }
  706. LinkedListNode<KThread> nextNode = currentNode.Next;
  707. _mutexWaiters.Remove(currentNode);
  708. currentNode.Value.MutexOwner = newMutexOwner;
  709. if (newMutexOwner != null)
  710. {
  711. // New owner was already selected, re-insert on new owner list.
  712. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  713. }
  714. else
  715. {
  716. // New owner not selected yet, use current thread.
  717. newMutexOwner = currentNode.Value;
  718. }
  719. count++;
  720. currentNode = nextNode;
  721. }
  722. while (currentNode != null);
  723. if (newMutexOwner != null)
  724. {
  725. UpdatePriorityInheritance();
  726. newMutexOwner.UpdatePriorityInheritance();
  727. }
  728. return newMutexOwner;
  729. }
  730. private void UpdatePriorityInheritance()
  731. {
  732. // If any of the threads waiting for the mutex has
  733. // higher priority than the current thread, then
  734. // the current thread inherits that priority.
  735. int highestPriority = BasePriority;
  736. if (_mutexWaiters.First != null)
  737. {
  738. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  739. if (waitingDynamicPriority < highestPriority)
  740. {
  741. highestPriority = waitingDynamicPriority;
  742. }
  743. }
  744. if (highestPriority != DynamicPriority)
  745. {
  746. int oldPriority = DynamicPriority;
  747. DynamicPriority = highestPriority;
  748. AdjustSchedulingForNewPriority(oldPriority);
  749. if (MutexOwner != null)
  750. {
  751. // Remove and re-insert to ensure proper sorting based on new priority.
  752. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  753. MutexOwner.AddToMutexWaitersList(this);
  754. MutexOwner.UpdatePriorityInheritance();
  755. }
  756. }
  757. }
  758. private void AddToMutexWaitersList(KThread thread)
  759. {
  760. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  761. int currentPriority = thread.DynamicPriority;
  762. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  763. {
  764. nextPrio = nextPrio.Next;
  765. }
  766. if (nextPrio != null)
  767. {
  768. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  769. }
  770. else
  771. {
  772. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  773. }
  774. }
  775. private void AdjustScheduling(ThreadSchedState oldFlags)
  776. {
  777. if (oldFlags == SchedFlags)
  778. {
  779. return;
  780. }
  781. if (!IsSchedulable)
  782. {
  783. if (!_forcedUnschedulable)
  784. {
  785. // Ensure our thread is running and we have an event.
  786. StartHostThread();
  787. // If the thread is not schedulable, we want to just run or pause
  788. // it directly as we don't care about priority or the core it is
  789. // running on in this case.
  790. if (SchedFlags == ThreadSchedState.Running)
  791. {
  792. _schedulerWaitEvent.Set();
  793. }
  794. else
  795. {
  796. _schedulerWaitEvent.Reset();
  797. }
  798. }
  799. return;
  800. }
  801. if (oldFlags == ThreadSchedState.Running)
  802. {
  803. // Was running, now it's stopped.
  804. if (ActiveCore >= 0)
  805. {
  806. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  807. }
  808. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  809. {
  810. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  811. {
  812. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  813. }
  814. }
  815. }
  816. else if (SchedFlags == ThreadSchedState.Running)
  817. {
  818. // Was stopped, now it's running.
  819. if (ActiveCore >= 0)
  820. {
  821. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  822. }
  823. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  824. {
  825. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  826. {
  827. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  828. }
  829. }
  830. }
  831. KernelContext.ThreadReselectionRequested = true;
  832. }
  833. private void AdjustSchedulingForNewPriority(int oldPriority)
  834. {
  835. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  836. {
  837. return;
  838. }
  839. // Remove thread from the old priority queues.
  840. if (ActiveCore >= 0)
  841. {
  842. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  843. }
  844. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  845. {
  846. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  847. {
  848. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  849. }
  850. }
  851. // Add thread to the new priority queues.
  852. KThread currentThread = KernelStatic.GetCurrentThread();
  853. if (ActiveCore >= 0)
  854. {
  855. if (currentThread == this)
  856. {
  857. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  858. }
  859. else
  860. {
  861. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  862. }
  863. }
  864. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  865. {
  866. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  867. {
  868. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  869. }
  870. }
  871. KernelContext.ThreadReselectionRequested = true;
  872. }
  873. private void AdjustSchedulingForNewAffinity(ulong oldAffinityMask, int oldCore)
  874. {
  875. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  876. {
  877. return;
  878. }
  879. // Remove thread from the old priority queues.
  880. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  881. {
  882. if (((oldAffinityMask >> core) & 1) != 0)
  883. {
  884. if (core == oldCore)
  885. {
  886. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  887. }
  888. else
  889. {
  890. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  891. }
  892. }
  893. }
  894. // Add thread to the new priority queues.
  895. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  896. {
  897. if (((AffinityMask >> core) & 1) != 0)
  898. {
  899. if (core == ActiveCore)
  900. {
  901. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  902. }
  903. else
  904. {
  905. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  906. }
  907. }
  908. }
  909. KernelContext.ThreadReselectionRequested = true;
  910. }
  911. public void SetEntryArguments(long argsPtr, int threadHandle)
  912. {
  913. Context.SetX(0, (ulong)argsPtr);
  914. Context.SetX(1, (ulong)threadHandle);
  915. }
  916. public void TimeUp()
  917. {
  918. ReleaseAndResume();
  919. }
  920. public string GetGuestStackTrace()
  921. {
  922. return Owner.Debugger.GetGuestStackTrace(this);
  923. }
  924. public string GetGuestRegisterPrintout()
  925. {
  926. return Owner.Debugger.GetCpuRegisterPrintout(this);
  927. }
  928. public void PrintGuestStackTrace()
  929. {
  930. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  931. }
  932. public void PrintGuestRegisterPrintout()
  933. {
  934. Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
  935. }
  936. public void AddCpuTime(long ticks)
  937. {
  938. Interlocked.Add(ref _totalTimeRunning, ticks);
  939. }
  940. public void StartHostThread()
  941. {
  942. if (_schedulerWaitEvent == null)
  943. {
  944. var schedulerWaitEvent = new ManualResetEvent(false);
  945. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  946. {
  947. HostThread.Start();
  948. }
  949. else
  950. {
  951. schedulerWaitEvent.Dispose();
  952. }
  953. }
  954. }
  955. private void ThreadStart()
  956. {
  957. _schedulerWaitEvent.WaitOne();
  958. KernelStatic.SetKernelContext(KernelContext, this);
  959. if (_customThreadStart != null)
  960. {
  961. _customThreadStart();
  962. }
  963. else
  964. {
  965. Owner.Context.Execute(Context, _entrypoint);
  966. }
  967. Context.Dispose();
  968. _schedulerWaitEvent.Dispose();
  969. }
  970. public void MakeUnschedulable()
  971. {
  972. _forcedUnschedulable = true;
  973. }
  974. public override bool IsSignaled()
  975. {
  976. return _hasExited != 0;
  977. }
  978. protected override void Destroy()
  979. {
  980. if (_hasBeenInitialized)
  981. {
  982. FreeResources();
  983. bool released = Owner != null || _hasBeenReleased;
  984. if (Owner != null)
  985. {
  986. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  987. Owner.DecrementReferenceCount();
  988. }
  989. else
  990. {
  991. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  992. }
  993. }
  994. }
  995. private void FreeResources()
  996. {
  997. Owner?.RemoveThread(this);
  998. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  999. {
  1000. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  1001. }
  1002. KernelContext.CriticalSection.Enter();
  1003. // Wake up all threads that may be waiting for a mutex being held by this thread.
  1004. foreach (KThread thread in _mutexWaiters)
  1005. {
  1006. thread.MutexOwner = null;
  1007. thread._originalPreferredCore = 0;
  1008. thread.ObjSyncResult = KernelResult.InvalidState;
  1009. thread.ReleaseAndResume();
  1010. }
  1011. KernelContext.CriticalSection.Leave();
  1012. Owner?.DecrementThreadCountAndTerminateIfZero();
  1013. }
  1014. public void Pin()
  1015. {
  1016. IsPinned = true;
  1017. _coreMigrationDisableCount++;
  1018. int activeCore = ActiveCore;
  1019. _originalPreferredCore = PreferredCore;
  1020. _originalAffinityMask = AffinityMask;
  1021. ActiveCore = CurrentCore;
  1022. PreferredCore = CurrentCore;
  1023. AffinityMask = 1UL << CurrentCore;
  1024. if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask)
  1025. {
  1026. AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore);
  1027. }
  1028. _originalBasePriority = BasePriority;
  1029. BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1);
  1030. UpdatePriorityInheritance();
  1031. // Disallows thread pausing
  1032. _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag;
  1033. CombineForcePauseFlags();
  1034. // TODO: Assign reduced SVC permissions
  1035. }
  1036. public void Unpin()
  1037. {
  1038. IsPinned = false;
  1039. _coreMigrationDisableCount--;
  1040. ulong affinityMask = AffinityMask;
  1041. int activeCore = ActiveCore;
  1042. PreferredCore = _originalPreferredCore;
  1043. AffinityMask = _originalAffinityMask;
  1044. if (AffinityMask != affinityMask)
  1045. {
  1046. if ((AffinityMask & 1UL << ActiveCore) != 0)
  1047. {
  1048. if (PreferredCore >= 0)
  1049. {
  1050. ActiveCore = PreferredCore;
  1051. }
  1052. else
  1053. {
  1054. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  1055. }
  1056. AdjustSchedulingForNewAffinity(affinityMask, activeCore);
  1057. }
  1058. }
  1059. BasePriority = _originalBasePriority;
  1060. UpdatePriorityInheritance();
  1061. if (!TerminationRequested)
  1062. {
  1063. // Allows thread pausing
  1064. _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag;
  1065. CombineForcePauseFlags();
  1066. // TODO: Restore SVC permissions
  1067. }
  1068. // Wake up waiters
  1069. foreach (KThread waiter in _pinnedWaiters)
  1070. {
  1071. waiter.ReleaseAndResume();
  1072. }
  1073. _pinnedWaiters.Clear();
  1074. }
  1075. public void SynchronizePreemptionState()
  1076. {
  1077. KernelContext.CriticalSection.Enter();
  1078. if (Owner != null && Owner.PinnedThreads[CurrentCore] == this)
  1079. {
  1080. ClearUserInterruptFlag();
  1081. Owner.UnpinThread(this);
  1082. }
  1083. KernelContext.CriticalSection.Leave();
  1084. }
  1085. public ushort GetUserDisableCount()
  1086. {
  1087. return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset);
  1088. }
  1089. public void SetUserInterruptFlag()
  1090. {
  1091. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1);
  1092. }
  1093. public void ClearUserInterruptFlag()
  1094. {
  1095. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0);
  1096. }
  1097. }
  1098. }