KThread.cs 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using Ryujinx.HLE.HOS.Kernel.SupervisorCall;
  6. using System;
  7. using System.Collections.Generic;
  8. using System.Numerics;
  9. using System.Threading;
  10. namespace Ryujinx.HLE.HOS.Kernel.Threading
  11. {
  12. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  13. {
  14. private const int TlsUserDisableCountOffset = 0x100;
  15. private const int TlsUserInterruptFlagOffset = 0x102;
  16. public const int MaxWaitSyncObjects = 64;
  17. private ManualResetEvent _schedulerWaitEvent;
  18. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  19. public Thread HostThread { get; private set; }
  20. public ARMeilleure.State.ExecutionContext Context { get; private set; }
  21. public KThreadContext ThreadContext { get; private set; }
  22. public int DynamicPriority { get; set; }
  23. public ulong AffinityMask { get; set; }
  24. public long ThreadUid { get; private set; }
  25. private long _totalTimeRunning;
  26. public long TotalTimeRunning => _totalTimeRunning;
  27. public KSynchronizationObject SignaledObj { get; set; }
  28. public ulong CondVarAddress { get; set; }
  29. private ulong _entrypoint;
  30. private ThreadStart _customThreadStart;
  31. private bool _forcedUnschedulable;
  32. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  33. public ulong MutexAddress { get; set; }
  34. public int KernelWaitersCount { get; private set; }
  35. public KProcess Owner { get; private set; }
  36. private ulong _tlsAddress;
  37. public ulong TlsAddress => _tlsAddress;
  38. public KSynchronizationObject[] WaitSyncObjects { get; }
  39. public int[] WaitSyncHandles { get; }
  40. public long LastScheduledTime { get; set; }
  41. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  42. public LinkedList<KThread> Withholder { get; set; }
  43. public LinkedListNode<KThread> WithholderNode { get; set; }
  44. public LinkedListNode<KThread> ProcessListNode { get; set; }
  45. private LinkedList<KThread> _mutexWaiters;
  46. private LinkedListNode<KThread> _mutexWaiterNode;
  47. private LinkedList<KThread> _pinnedWaiters;
  48. public KThread MutexOwner { get; private set; }
  49. public int ThreadHandleForUserMutex { get; set; }
  50. private ThreadSchedState _forcePauseFlags;
  51. private ThreadSchedState _forcePausePermissionFlags;
  52. public KernelResult ObjSyncResult { get; set; }
  53. public int BasePriority { get; set; }
  54. public int PreferredCore { get; set; }
  55. public int CurrentCore { get; set; }
  56. public int ActiveCore { get; set; }
  57. public bool IsPinned { get; private set; }
  58. private ulong _originalAffinityMask;
  59. private int _originalPreferredCore;
  60. private int _originalBasePriority;
  61. private int _coreMigrationDisableCount;
  62. public ThreadSchedState SchedFlags { get; private set; }
  63. private int _shallBeTerminated;
  64. public bool ShallBeTerminated
  65. {
  66. get => _shallBeTerminated != 0;
  67. set => _shallBeTerminated = value ? 1 : 0;
  68. }
  69. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  70. public bool SyncCancelled { get; set; }
  71. public bool WaitingSync { get; set; }
  72. private int _hasExited;
  73. private bool _hasBeenInitialized;
  74. private bool _hasBeenReleased;
  75. public bool WaitingInArbitration { get; set; }
  76. public long LastPc { get; set; }
  77. private object ActivityOperationLock = new object();
  78. public KThread(KernelContext context) : base(context)
  79. {
  80. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  81. WaitSyncHandles = new int[MaxWaitSyncObjects];
  82. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  83. _mutexWaiters = new LinkedList<KThread>();
  84. _pinnedWaiters = new LinkedList<KThread>();
  85. }
  86. public KernelResult Initialize(
  87. ulong entrypoint,
  88. ulong argsPtr,
  89. ulong stackTop,
  90. int priority,
  91. int cpuCore,
  92. KProcess owner,
  93. ThreadType type,
  94. ThreadStart customThreadStart = null)
  95. {
  96. if ((uint)type > 3)
  97. {
  98. throw new ArgumentException($"Invalid thread type \"{type}\".");
  99. }
  100. ThreadContext = new KThreadContext();
  101. PreferredCore = cpuCore;
  102. AffinityMask |= 1UL << cpuCore;
  103. SchedFlags = type == ThreadType.Dummy
  104. ? ThreadSchedState.Running
  105. : ThreadSchedState.None;
  106. ActiveCore = cpuCore;
  107. ObjSyncResult = KernelResult.ThreadNotStarted;
  108. DynamicPriority = priority;
  109. BasePriority = priority;
  110. CurrentCore = cpuCore;
  111. IsPinned = false;
  112. _entrypoint = entrypoint;
  113. _customThreadStart = customThreadStart;
  114. if (type == ThreadType.User)
  115. {
  116. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  117. {
  118. return KernelResult.OutOfMemory;
  119. }
  120. MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
  121. }
  122. bool is64Bits;
  123. if (owner != null)
  124. {
  125. Owner = owner;
  126. owner.IncrementReferenceCount();
  127. owner.IncrementThreadCount();
  128. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  129. }
  130. else
  131. {
  132. is64Bits = true;
  133. }
  134. HostThread = new Thread(ThreadStart);
  135. Context = CpuContext.CreateExecutionContext();
  136. Context.IsAarch32 = !is64Bits;
  137. Context.SetX(0, argsPtr);
  138. if (is64Bits)
  139. {
  140. Context.SetX(18, KSystemControl.GenerateRandom() | 1);
  141. Context.SetX(31, stackTop);
  142. }
  143. else
  144. {
  145. Context.SetX(13, (uint)stackTop);
  146. }
  147. Context.CntfrqEl0 = 19200000;
  148. Context.Tpidr = (long)_tlsAddress;
  149. ThreadUid = KernelContext.NewThreadUid();
  150. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  151. _hasBeenInitialized = true;
  152. _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask;
  153. if (owner != null)
  154. {
  155. owner.SubscribeThreadEventHandlers(Context);
  156. owner.AddThread(this);
  157. if (owner.IsPaused)
  158. {
  159. KernelContext.CriticalSection.Enter();
  160. if (TerminationRequested)
  161. {
  162. KernelContext.CriticalSection.Leave();
  163. return KernelResult.Success;
  164. }
  165. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  166. CombineForcePauseFlags();
  167. KernelContext.CriticalSection.Leave();
  168. }
  169. }
  170. return KernelResult.Success;
  171. }
  172. public KernelResult Start()
  173. {
  174. if (!KernelContext.KernelInitialized)
  175. {
  176. KernelContext.CriticalSection.Enter();
  177. if (!TerminationRequested)
  178. {
  179. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  180. CombineForcePauseFlags();
  181. }
  182. KernelContext.CriticalSection.Leave();
  183. }
  184. KernelResult result = KernelResult.ThreadTerminating;
  185. KernelContext.CriticalSection.Enter();
  186. if (!ShallBeTerminated)
  187. {
  188. KThread currentThread = KernelStatic.GetCurrentThread();
  189. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  190. {
  191. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  192. {
  193. result = KernelResult.InvalidState;
  194. break;
  195. }
  196. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  197. {
  198. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  199. {
  200. CombineForcePauseFlags();
  201. }
  202. SetNewSchedFlags(ThreadSchedState.Running);
  203. StartHostThread();
  204. result = KernelResult.Success;
  205. break;
  206. }
  207. else
  208. {
  209. currentThread.CombineForcePauseFlags();
  210. KernelContext.CriticalSection.Leave();
  211. KernelContext.CriticalSection.Enter();
  212. if (currentThread.ShallBeTerminated)
  213. {
  214. break;
  215. }
  216. }
  217. }
  218. }
  219. KernelContext.CriticalSection.Leave();
  220. return result;
  221. }
  222. public ThreadSchedState PrepareForTermination()
  223. {
  224. KernelContext.CriticalSection.Enter();
  225. if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this)
  226. {
  227. Owner.UnpinThread(this);
  228. }
  229. ThreadSchedState result;
  230. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  231. {
  232. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  233. {
  234. SchedFlags = ThreadSchedState.TerminationPending;
  235. }
  236. else
  237. {
  238. if (_forcePauseFlags != ThreadSchedState.None)
  239. {
  240. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  241. ThreadSchedState oldSchedFlags = SchedFlags;
  242. SchedFlags &= ThreadSchedState.LowMask;
  243. AdjustScheduling(oldSchedFlags);
  244. }
  245. if (BasePriority >= 0x10)
  246. {
  247. SetPriority(0xF);
  248. }
  249. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  250. {
  251. // TODO: GIC distributor stuffs (sgir changes ect)
  252. Context.RequestInterrupt();
  253. }
  254. SignaledObj = null;
  255. ObjSyncResult = KernelResult.ThreadTerminating;
  256. ReleaseAndResume();
  257. }
  258. }
  259. result = SchedFlags;
  260. KernelContext.CriticalSection.Leave();
  261. return result & ThreadSchedState.LowMask;
  262. }
  263. public void Terminate()
  264. {
  265. ThreadSchedState state = PrepareForTermination();
  266. if (state != ThreadSchedState.TerminationPending)
  267. {
  268. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  269. }
  270. }
  271. public void HandlePostSyscall()
  272. {
  273. ThreadSchedState state;
  274. do
  275. {
  276. if (TerminationRequested)
  277. {
  278. Exit();
  279. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  280. break;
  281. }
  282. KernelContext.CriticalSection.Enter();
  283. if (TerminationRequested)
  284. {
  285. state = ThreadSchedState.TerminationPending;
  286. }
  287. else
  288. {
  289. if (_forcePauseFlags != ThreadSchedState.None)
  290. {
  291. CombineForcePauseFlags();
  292. }
  293. state = ThreadSchedState.Running;
  294. }
  295. KernelContext.CriticalSection.Leave();
  296. } while (state == ThreadSchedState.TerminationPending);
  297. }
  298. public void Exit()
  299. {
  300. // TODO: Debug event.
  301. if (Owner != null)
  302. {
  303. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  304. _hasBeenReleased = true;
  305. }
  306. KernelContext.CriticalSection.Enter();
  307. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  308. _forcePausePermissionFlags = 0;
  309. bool decRef = ExitImpl();
  310. Context.StopRunning();
  311. KernelContext.CriticalSection.Leave();
  312. if (decRef)
  313. {
  314. DecrementReferenceCount();
  315. }
  316. }
  317. private bool ExitImpl()
  318. {
  319. KernelContext.CriticalSection.Enter();
  320. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  321. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  322. Signal();
  323. KernelContext.CriticalSection.Leave();
  324. return decRef;
  325. }
  326. private int GetEffectiveRunningCore()
  327. {
  328. for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++)
  329. {
  330. if (KernelContext.Schedulers[coreNumber].CurrentThread == this)
  331. {
  332. return coreNumber;
  333. }
  334. }
  335. return -1;
  336. }
  337. public KernelResult Sleep(long timeout)
  338. {
  339. KernelContext.CriticalSection.Enter();
  340. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  341. {
  342. KernelContext.CriticalSection.Leave();
  343. return KernelResult.ThreadTerminating;
  344. }
  345. SetNewSchedFlags(ThreadSchedState.Paused);
  346. if (timeout > 0)
  347. {
  348. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  349. }
  350. KernelContext.CriticalSection.Leave();
  351. if (timeout > 0)
  352. {
  353. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  354. }
  355. return 0;
  356. }
  357. public void SetPriority(int priority)
  358. {
  359. KernelContext.CriticalSection.Enter();
  360. if (IsPinned)
  361. {
  362. _originalBasePriority = priority;
  363. }
  364. else
  365. {
  366. BasePriority = priority;
  367. }
  368. UpdatePriorityInheritance();
  369. KernelContext.CriticalSection.Leave();
  370. }
  371. public void Suspend(ThreadSchedState type)
  372. {
  373. _forcePauseFlags |= type;
  374. CombineForcePauseFlags();
  375. }
  376. public void Resume(ThreadSchedState type)
  377. {
  378. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  379. _forcePauseFlags &= ~type;
  380. if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
  381. {
  382. ThreadSchedState oldSchedFlags = SchedFlags;
  383. SchedFlags &= ThreadSchedState.LowMask;
  384. AdjustScheduling(oldSchedFlags);
  385. }
  386. }
  387. public KernelResult SetActivity(bool pause)
  388. {
  389. lock (ActivityOperationLock)
  390. {
  391. KernelResult result = KernelResult.Success;
  392. KernelContext.CriticalSection.Enter();
  393. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  394. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  395. {
  396. KernelContext.CriticalSection.Leave();
  397. return KernelResult.InvalidState;
  398. }
  399. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  400. {
  401. if (pause)
  402. {
  403. // Pause, the force pause flag should be clear (thread is NOT paused).
  404. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  405. {
  406. Suspend(ThreadSchedState.ThreadPauseFlag);
  407. }
  408. else
  409. {
  410. result = KernelResult.InvalidState;
  411. }
  412. }
  413. else
  414. {
  415. // Unpause, the force pause flag should be set (thread is paused).
  416. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  417. {
  418. Resume(ThreadSchedState.ThreadPauseFlag);
  419. }
  420. else
  421. {
  422. result = KernelResult.InvalidState;
  423. }
  424. }
  425. }
  426. KernelContext.CriticalSection.Leave();
  427. if (result == KernelResult.Success && pause)
  428. {
  429. bool isThreadRunning = true;
  430. while (isThreadRunning)
  431. {
  432. KernelContext.CriticalSection.Enter();
  433. if (TerminationRequested)
  434. {
  435. KernelContext.CriticalSection.Leave();
  436. break;
  437. }
  438. isThreadRunning = false;
  439. if (IsPinned)
  440. {
  441. KThread currentThread = KernelStatic.GetCurrentThread();
  442. if (currentThread.TerminationRequested)
  443. {
  444. KernelContext.CriticalSection.Leave();
  445. result = KernelResult.ThreadTerminating;
  446. break;
  447. }
  448. _pinnedWaiters.AddLast(currentThread);
  449. currentThread.Reschedule(ThreadSchedState.Paused);
  450. }
  451. else
  452. {
  453. isThreadRunning = GetEffectiveRunningCore() >= 0;
  454. }
  455. KernelContext.CriticalSection.Leave();
  456. }
  457. }
  458. return result;
  459. }
  460. }
  461. public KernelResult GetThreadContext3(out ThreadContext context)
  462. {
  463. context = default;
  464. lock (ActivityOperationLock)
  465. {
  466. KernelContext.CriticalSection.Enter();
  467. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  468. {
  469. KernelContext.CriticalSection.Leave();
  470. return KernelResult.InvalidState;
  471. }
  472. if (!TerminationRequested)
  473. {
  474. context = GetCurrentContext();
  475. }
  476. KernelContext.CriticalSection.Leave();
  477. }
  478. return KernelResult.Success;
  479. }
  480. private static uint GetPsr(ARMeilleure.State.ExecutionContext context)
  481. {
  482. return (context.GetPstateFlag(ARMeilleure.State.PState.NFlag) ? (1U << (int)ARMeilleure.State.PState.NFlag) : 0U) |
  483. (context.GetPstateFlag(ARMeilleure.State.PState.ZFlag) ? (1U << (int)ARMeilleure.State.PState.ZFlag) : 0U) |
  484. (context.GetPstateFlag(ARMeilleure.State.PState.CFlag) ? (1U << (int)ARMeilleure.State.PState.CFlag) : 0U) |
  485. (context.GetPstateFlag(ARMeilleure.State.PState.VFlag) ? (1U << (int)ARMeilleure.State.PState.VFlag) : 0U);
  486. }
  487. private ThreadContext GetCurrentContext()
  488. {
  489. const int MaxRegistersAArch32 = 15;
  490. const int MaxFpuRegistersAArch32 = 16;
  491. ThreadContext context = new ThreadContext();
  492. if (Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit))
  493. {
  494. for (int i = 0; i < context.Registers.Length; i++)
  495. {
  496. context.Registers[i] = Context.GetX(i);
  497. }
  498. for (int i = 0; i < context.FpuRegisters.Length; i++)
  499. {
  500. context.FpuRegisters[i] = Context.GetV(i);
  501. }
  502. context.Fp = Context.GetX(29);
  503. context.Lr = Context.GetX(30);
  504. context.Sp = Context.GetX(31);
  505. context.Pc = (ulong)LastPc;
  506. context.Pstate = GetPsr(Context);
  507. context.Tpidr = (ulong)Context.Tpidr;
  508. }
  509. else
  510. {
  511. for (int i = 0; i < MaxRegistersAArch32; i++)
  512. {
  513. context.Registers[i] = (uint)Context.GetX(i);
  514. }
  515. for (int i = 0; i < MaxFpuRegistersAArch32; i++)
  516. {
  517. context.FpuRegisters[i] = Context.GetV(i);
  518. }
  519. context.Pc = (uint)LastPc;
  520. context.Pstate = GetPsr(Context);
  521. context.Tpidr = (uint)Context.Tpidr;
  522. }
  523. context.Fpcr = (uint)Context.Fpcr;
  524. context.Fpsr = (uint)Context.Fpsr;
  525. return context;
  526. }
  527. public void CancelSynchronization()
  528. {
  529. KernelContext.CriticalSection.Enter();
  530. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  531. {
  532. SyncCancelled = true;
  533. }
  534. else if (Withholder != null)
  535. {
  536. Withholder.Remove(WithholderNode);
  537. SetNewSchedFlags(ThreadSchedState.Running);
  538. Withholder = null;
  539. SyncCancelled = true;
  540. }
  541. else
  542. {
  543. SignaledObj = null;
  544. ObjSyncResult = KernelResult.Cancelled;
  545. SetNewSchedFlags(ThreadSchedState.Running);
  546. SyncCancelled = false;
  547. }
  548. KernelContext.CriticalSection.Leave();
  549. }
  550. public KernelResult SetCoreAndAffinityMask(int newCore, ulong newAffinityMask)
  551. {
  552. lock (ActivityOperationLock)
  553. {
  554. KernelContext.CriticalSection.Enter();
  555. bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0;
  556. // The value -3 is "do not change the preferred core".
  557. if (newCore == -3)
  558. {
  559. newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore;
  560. if ((newAffinityMask & (1UL << newCore)) == 0)
  561. {
  562. KernelContext.CriticalSection.Leave();
  563. return KernelResult.InvalidCombination;
  564. }
  565. }
  566. if (isCoreMigrationDisabled)
  567. {
  568. _originalPreferredCore = newCore;
  569. _originalAffinityMask = newAffinityMask;
  570. }
  571. else
  572. {
  573. ulong oldAffinityMask = AffinityMask;
  574. PreferredCore = newCore;
  575. AffinityMask = newAffinityMask;
  576. if (oldAffinityMask != newAffinityMask)
  577. {
  578. int oldCore = ActiveCore;
  579. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  580. {
  581. if (PreferredCore < 0)
  582. {
  583. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount(AffinityMask);
  584. }
  585. else
  586. {
  587. ActiveCore = PreferredCore;
  588. }
  589. }
  590. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  591. }
  592. }
  593. KernelContext.CriticalSection.Leave();
  594. bool targetThreadPinned = true;
  595. while (targetThreadPinned)
  596. {
  597. KernelContext.CriticalSection.Enter();
  598. if (TerminationRequested)
  599. {
  600. KernelContext.CriticalSection.Leave();
  601. break;
  602. }
  603. targetThreadPinned = false;
  604. int coreNumber = GetEffectiveRunningCore();
  605. bool isPinnedThreadCurrentlyRunning = coreNumber >= 0;
  606. if (isPinnedThreadCurrentlyRunning && ((1UL << coreNumber) & AffinityMask) == 0)
  607. {
  608. if (IsPinned)
  609. {
  610. KThread currentThread = KernelStatic.GetCurrentThread();
  611. if (currentThread.TerminationRequested)
  612. {
  613. KernelContext.CriticalSection.Leave();
  614. return KernelResult.ThreadTerminating;
  615. }
  616. _pinnedWaiters.AddLast(currentThread);
  617. currentThread.Reschedule(ThreadSchedState.Paused);
  618. }
  619. else
  620. {
  621. targetThreadPinned = true;
  622. }
  623. }
  624. KernelContext.CriticalSection.Leave();
  625. }
  626. return KernelResult.Success;
  627. }
  628. }
  629. private void CombineForcePauseFlags()
  630. {
  631. ThreadSchedState oldFlags = SchedFlags;
  632. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  633. SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags);
  634. AdjustScheduling(oldFlags);
  635. }
  636. private void SetNewSchedFlags(ThreadSchedState newFlags)
  637. {
  638. KernelContext.CriticalSection.Enter();
  639. ThreadSchedState oldFlags = SchedFlags;
  640. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  641. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  642. {
  643. AdjustScheduling(oldFlags);
  644. }
  645. KernelContext.CriticalSection.Leave();
  646. }
  647. public void ReleaseAndResume()
  648. {
  649. KernelContext.CriticalSection.Enter();
  650. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  651. {
  652. if (Withholder != null)
  653. {
  654. Withholder.Remove(WithholderNode);
  655. SetNewSchedFlags(ThreadSchedState.Running);
  656. Withholder = null;
  657. }
  658. else
  659. {
  660. SetNewSchedFlags(ThreadSchedState.Running);
  661. }
  662. }
  663. KernelContext.CriticalSection.Leave();
  664. }
  665. public void Reschedule(ThreadSchedState newFlags)
  666. {
  667. KernelContext.CriticalSection.Enter();
  668. ThreadSchedState oldFlags = SchedFlags;
  669. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  670. (newFlags & ThreadSchedState.LowMask);
  671. AdjustScheduling(oldFlags);
  672. KernelContext.CriticalSection.Leave();
  673. }
  674. public void AddMutexWaiter(KThread requester)
  675. {
  676. AddToMutexWaitersList(requester);
  677. requester.MutexOwner = this;
  678. UpdatePriorityInheritance();
  679. }
  680. public void RemoveMutexWaiter(KThread thread)
  681. {
  682. if (thread._mutexWaiterNode?.List != null)
  683. {
  684. _mutexWaiters.Remove(thread._mutexWaiterNode);
  685. }
  686. thread.MutexOwner = null;
  687. UpdatePriorityInheritance();
  688. }
  689. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  690. {
  691. count = 0;
  692. if (_mutexWaiters.First == null)
  693. {
  694. return null;
  695. }
  696. KThread newMutexOwner = null;
  697. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  698. do
  699. {
  700. // Skip all threads that are not waiting for this mutex.
  701. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  702. {
  703. currentNode = currentNode.Next;
  704. }
  705. if (currentNode == null)
  706. {
  707. break;
  708. }
  709. LinkedListNode<KThread> nextNode = currentNode.Next;
  710. _mutexWaiters.Remove(currentNode);
  711. currentNode.Value.MutexOwner = newMutexOwner;
  712. if (newMutexOwner != null)
  713. {
  714. // New owner was already selected, re-insert on new owner list.
  715. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  716. }
  717. else
  718. {
  719. // New owner not selected yet, use current thread.
  720. newMutexOwner = currentNode.Value;
  721. }
  722. count++;
  723. currentNode = nextNode;
  724. }
  725. while (currentNode != null);
  726. if (newMutexOwner != null)
  727. {
  728. UpdatePriorityInheritance();
  729. newMutexOwner.UpdatePriorityInheritance();
  730. }
  731. return newMutexOwner;
  732. }
  733. private void UpdatePriorityInheritance()
  734. {
  735. // If any of the threads waiting for the mutex has
  736. // higher priority than the current thread, then
  737. // the current thread inherits that priority.
  738. int highestPriority = BasePriority;
  739. if (_mutexWaiters.First != null)
  740. {
  741. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  742. if (waitingDynamicPriority < highestPriority)
  743. {
  744. highestPriority = waitingDynamicPriority;
  745. }
  746. }
  747. if (highestPriority != DynamicPriority)
  748. {
  749. int oldPriority = DynamicPriority;
  750. DynamicPriority = highestPriority;
  751. AdjustSchedulingForNewPriority(oldPriority);
  752. if (MutexOwner != null)
  753. {
  754. // Remove and re-insert to ensure proper sorting based on new priority.
  755. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  756. MutexOwner.AddToMutexWaitersList(this);
  757. MutexOwner.UpdatePriorityInheritance();
  758. }
  759. }
  760. }
  761. private void AddToMutexWaitersList(KThread thread)
  762. {
  763. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  764. int currentPriority = thread.DynamicPriority;
  765. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  766. {
  767. nextPrio = nextPrio.Next;
  768. }
  769. if (nextPrio != null)
  770. {
  771. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  772. }
  773. else
  774. {
  775. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  776. }
  777. }
  778. private void AdjustScheduling(ThreadSchedState oldFlags)
  779. {
  780. if (oldFlags == SchedFlags)
  781. {
  782. return;
  783. }
  784. if (!IsSchedulable)
  785. {
  786. if (!_forcedUnschedulable)
  787. {
  788. // Ensure our thread is running and we have an event.
  789. StartHostThread();
  790. // If the thread is not schedulable, we want to just run or pause
  791. // it directly as we don't care about priority or the core it is
  792. // running on in this case.
  793. if (SchedFlags == ThreadSchedState.Running)
  794. {
  795. _schedulerWaitEvent.Set();
  796. }
  797. else
  798. {
  799. _schedulerWaitEvent.Reset();
  800. }
  801. }
  802. return;
  803. }
  804. if (oldFlags == ThreadSchedState.Running)
  805. {
  806. // Was running, now it's stopped.
  807. if (ActiveCore >= 0)
  808. {
  809. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  810. }
  811. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  812. {
  813. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  814. {
  815. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  816. }
  817. }
  818. }
  819. else if (SchedFlags == ThreadSchedState.Running)
  820. {
  821. // Was stopped, now it's running.
  822. if (ActiveCore >= 0)
  823. {
  824. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  825. }
  826. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  827. {
  828. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  829. {
  830. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  831. }
  832. }
  833. }
  834. KernelContext.ThreadReselectionRequested = true;
  835. }
  836. private void AdjustSchedulingForNewPriority(int oldPriority)
  837. {
  838. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  839. {
  840. return;
  841. }
  842. // Remove thread from the old priority queues.
  843. if (ActiveCore >= 0)
  844. {
  845. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  846. }
  847. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  848. {
  849. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  850. {
  851. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  852. }
  853. }
  854. // Add thread to the new priority queues.
  855. KThread currentThread = KernelStatic.GetCurrentThread();
  856. if (ActiveCore >= 0)
  857. {
  858. if (currentThread == this)
  859. {
  860. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  861. }
  862. else
  863. {
  864. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  865. }
  866. }
  867. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  868. {
  869. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  870. {
  871. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  872. }
  873. }
  874. KernelContext.ThreadReselectionRequested = true;
  875. }
  876. private void AdjustSchedulingForNewAffinity(ulong oldAffinityMask, int oldCore)
  877. {
  878. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  879. {
  880. return;
  881. }
  882. // Remove thread from the old priority queues.
  883. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  884. {
  885. if (((oldAffinityMask >> core) & 1) != 0)
  886. {
  887. if (core == oldCore)
  888. {
  889. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  890. }
  891. else
  892. {
  893. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  894. }
  895. }
  896. }
  897. // Add thread to the new priority queues.
  898. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  899. {
  900. if (((AffinityMask >> core) & 1) != 0)
  901. {
  902. if (core == ActiveCore)
  903. {
  904. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  905. }
  906. else
  907. {
  908. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  909. }
  910. }
  911. }
  912. KernelContext.ThreadReselectionRequested = true;
  913. }
  914. public void SetEntryArguments(long argsPtr, int threadHandle)
  915. {
  916. Context.SetX(0, (ulong)argsPtr);
  917. Context.SetX(1, (ulong)threadHandle);
  918. }
  919. public void TimeUp()
  920. {
  921. ReleaseAndResume();
  922. }
  923. public string GetGuestStackTrace()
  924. {
  925. return Owner.Debugger.GetGuestStackTrace(this);
  926. }
  927. public string GetGuestRegisterPrintout()
  928. {
  929. return Owner.Debugger.GetCpuRegisterPrintout(this);
  930. }
  931. public void PrintGuestStackTrace()
  932. {
  933. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  934. }
  935. public void PrintGuestRegisterPrintout()
  936. {
  937. Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
  938. }
  939. public void AddCpuTime(long ticks)
  940. {
  941. Interlocked.Add(ref _totalTimeRunning, ticks);
  942. }
  943. public void StartHostThread()
  944. {
  945. if (_schedulerWaitEvent == null)
  946. {
  947. var schedulerWaitEvent = new ManualResetEvent(false);
  948. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  949. {
  950. HostThread.Start();
  951. }
  952. else
  953. {
  954. schedulerWaitEvent.Dispose();
  955. }
  956. }
  957. }
  958. private void ThreadStart()
  959. {
  960. _schedulerWaitEvent.WaitOne();
  961. KernelStatic.SetKernelContext(KernelContext, this);
  962. if (_customThreadStart != null)
  963. {
  964. _customThreadStart();
  965. }
  966. else
  967. {
  968. Owner.Context.Execute(Context, _entrypoint);
  969. }
  970. Context.Dispose();
  971. _schedulerWaitEvent.Dispose();
  972. }
  973. public void MakeUnschedulable()
  974. {
  975. _forcedUnschedulable = true;
  976. }
  977. public override bool IsSignaled()
  978. {
  979. return _hasExited != 0;
  980. }
  981. protected override void Destroy()
  982. {
  983. if (_hasBeenInitialized)
  984. {
  985. FreeResources();
  986. bool released = Owner != null || _hasBeenReleased;
  987. if (Owner != null)
  988. {
  989. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  990. Owner.DecrementReferenceCount();
  991. }
  992. else
  993. {
  994. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  995. }
  996. }
  997. }
  998. private void FreeResources()
  999. {
  1000. Owner?.RemoveThread(this);
  1001. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  1002. {
  1003. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  1004. }
  1005. KernelContext.CriticalSection.Enter();
  1006. // Wake up all threads that may be waiting for a mutex being held by this thread.
  1007. foreach (KThread thread in _mutexWaiters)
  1008. {
  1009. thread.MutexOwner = null;
  1010. thread._originalPreferredCore = 0;
  1011. thread.ObjSyncResult = KernelResult.InvalidState;
  1012. thread.ReleaseAndResume();
  1013. }
  1014. KernelContext.CriticalSection.Leave();
  1015. Owner?.DecrementThreadCountAndTerminateIfZero();
  1016. }
  1017. public void Pin()
  1018. {
  1019. IsPinned = true;
  1020. _coreMigrationDisableCount++;
  1021. int activeCore = ActiveCore;
  1022. _originalPreferredCore = PreferredCore;
  1023. _originalAffinityMask = AffinityMask;
  1024. ActiveCore = CurrentCore;
  1025. PreferredCore = CurrentCore;
  1026. AffinityMask = 1UL << CurrentCore;
  1027. if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask)
  1028. {
  1029. AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore);
  1030. }
  1031. _originalBasePriority = BasePriority;
  1032. BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1);
  1033. UpdatePriorityInheritance();
  1034. // Disallows thread pausing
  1035. _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag;
  1036. CombineForcePauseFlags();
  1037. // TODO: Assign reduced SVC permissions
  1038. }
  1039. public void Unpin()
  1040. {
  1041. IsPinned = false;
  1042. _coreMigrationDisableCount--;
  1043. ulong affinityMask = AffinityMask;
  1044. int activeCore = ActiveCore;
  1045. PreferredCore = _originalPreferredCore;
  1046. AffinityMask = _originalAffinityMask;
  1047. if (AffinityMask != affinityMask)
  1048. {
  1049. if ((AffinityMask & 1UL << ActiveCore) != 0)
  1050. {
  1051. if (PreferredCore >= 0)
  1052. {
  1053. ActiveCore = PreferredCore;
  1054. }
  1055. else
  1056. {
  1057. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  1058. }
  1059. AdjustSchedulingForNewAffinity(affinityMask, activeCore);
  1060. }
  1061. }
  1062. BasePriority = _originalBasePriority;
  1063. UpdatePriorityInheritance();
  1064. if (!TerminationRequested)
  1065. {
  1066. // Allows thread pausing
  1067. _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag;
  1068. CombineForcePauseFlags();
  1069. // TODO: Restore SVC permissions
  1070. }
  1071. // Wake up waiters
  1072. foreach (KThread waiter in _pinnedWaiters)
  1073. {
  1074. waiter.ReleaseAndResume();
  1075. }
  1076. _pinnedWaiters.Clear();
  1077. }
  1078. public void SynchronizePreemptionState()
  1079. {
  1080. KernelContext.CriticalSection.Enter();
  1081. if (Owner != null && Owner.PinnedThreads[CurrentCore] == this)
  1082. {
  1083. ClearUserInterruptFlag();
  1084. Owner.UnpinThread(this);
  1085. }
  1086. KernelContext.CriticalSection.Leave();
  1087. }
  1088. public ushort GetUserDisableCount()
  1089. {
  1090. return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset);
  1091. }
  1092. public void SetUserInterruptFlag()
  1093. {
  1094. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1);
  1095. }
  1096. public void ClearUserInterruptFlag()
  1097. {
  1098. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0);
  1099. }
  1100. }
  1101. }