KThread.cs 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437
  1. using Ryujinx.Common.Logging;
  2. using Ryujinx.Cpu;
  3. using Ryujinx.HLE.HOS.Kernel.Common;
  4. using Ryujinx.HLE.HOS.Kernel.Process;
  5. using Ryujinx.HLE.HOS.Kernel.SupervisorCall;
  6. using System;
  7. using System.Collections.Generic;
  8. using System.Numerics;
  9. using System.Threading;
  10. namespace Ryujinx.HLE.HOS.Kernel.Threading
  11. {
  12. class KThread : KSynchronizationObject, IKFutureSchedulerObject
  13. {
  14. private const int TlsUserDisableCountOffset = 0x100;
  15. private const int TlsUserInterruptFlagOffset = 0x102;
  16. public const int MaxWaitSyncObjects = 64;
  17. private ManualResetEvent _schedulerWaitEvent;
  18. public ManualResetEvent SchedulerWaitEvent => _schedulerWaitEvent;
  19. public Thread HostThread { get; private set; }
  20. public IExecutionContext Context { get; private set; }
  21. public KThreadContext ThreadContext { get; private set; }
  22. public int DynamicPriority { get; set; }
  23. public ulong AffinityMask { get; set; }
  24. public ulong ThreadUid { get; private set; }
  25. private long _totalTimeRunning;
  26. public long TotalTimeRunning => _totalTimeRunning;
  27. public KSynchronizationObject SignaledObj { get; set; }
  28. public ulong CondVarAddress { get; set; }
  29. private ulong _entrypoint;
  30. private ThreadStart _customThreadStart;
  31. private bool _forcedUnschedulable;
  32. public bool IsSchedulable => _customThreadStart == null && !_forcedUnschedulable;
  33. public ulong MutexAddress { get; set; }
  34. public int KernelWaitersCount { get; private set; }
  35. public KProcess Owner { get; private set; }
  36. private ulong _tlsAddress;
  37. public ulong TlsAddress => _tlsAddress;
  38. public KSynchronizationObject[] WaitSyncObjects { get; }
  39. public int[] WaitSyncHandles { get; }
  40. public long LastScheduledTime { get; set; }
  41. public LinkedListNode<KThread>[] SiblingsPerCore { get; private set; }
  42. public LinkedList<KThread> Withholder { get; set; }
  43. public LinkedListNode<KThread> WithholderNode { get; set; }
  44. public LinkedListNode<KThread> ProcessListNode { get; set; }
  45. private LinkedList<KThread> _mutexWaiters;
  46. private LinkedListNode<KThread> _mutexWaiterNode;
  47. private LinkedList<KThread> _pinnedWaiters;
  48. public KThread MutexOwner { get; private set; }
  49. public int ThreadHandleForUserMutex { get; set; }
  50. private ThreadSchedState _forcePauseFlags;
  51. private ThreadSchedState _forcePausePermissionFlags;
  52. public KernelResult ObjSyncResult { get; set; }
  53. public int BasePriority { get; set; }
  54. public int PreferredCore { get; set; }
  55. public int CurrentCore { get; set; }
  56. public int ActiveCore { get; set; }
  57. public bool IsPinned { get; private set; }
  58. private ulong _originalAffinityMask;
  59. private int _originalPreferredCore;
  60. private int _originalBasePriority;
  61. private int _coreMigrationDisableCount;
  62. public ThreadSchedState SchedFlags { get; private set; }
  63. private int _shallBeTerminated;
  64. public bool ShallBeTerminated
  65. {
  66. get => _shallBeTerminated != 0;
  67. set => _shallBeTerminated = value ? 1 : 0;
  68. }
  69. public bool TerminationRequested => ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending;
  70. public bool SyncCancelled { get; set; }
  71. public bool WaitingSync { get; set; }
  72. private int _hasExited;
  73. private bool _hasBeenInitialized;
  74. private bool _hasBeenReleased;
  75. public bool WaitingInArbitration { get; set; }
  76. private object _activityOperationLock;
  77. public KThread(KernelContext context) : base(context)
  78. {
  79. WaitSyncObjects = new KSynchronizationObject[MaxWaitSyncObjects];
  80. WaitSyncHandles = new int[MaxWaitSyncObjects];
  81. SiblingsPerCore = new LinkedListNode<KThread>[KScheduler.CpuCoresCount];
  82. _mutexWaiters = new LinkedList<KThread>();
  83. _pinnedWaiters = new LinkedList<KThread>();
  84. _activityOperationLock = new object();
  85. }
  86. public KernelResult Initialize(
  87. ulong entrypoint,
  88. ulong argsPtr,
  89. ulong stackTop,
  90. int priority,
  91. int cpuCore,
  92. KProcess owner,
  93. ThreadType type,
  94. ThreadStart customThreadStart = null)
  95. {
  96. if ((uint)type > 3)
  97. {
  98. throw new ArgumentException($"Invalid thread type \"{type}\".");
  99. }
  100. ThreadContext = new KThreadContext();
  101. PreferredCore = cpuCore;
  102. AffinityMask |= 1UL << cpuCore;
  103. SchedFlags = type == ThreadType.Dummy
  104. ? ThreadSchedState.Running
  105. : ThreadSchedState.None;
  106. ActiveCore = cpuCore;
  107. ObjSyncResult = KernelResult.ThreadNotStarted;
  108. DynamicPriority = priority;
  109. BasePriority = priority;
  110. CurrentCore = cpuCore;
  111. IsPinned = false;
  112. _entrypoint = entrypoint;
  113. _customThreadStart = customThreadStart;
  114. if (type == ThreadType.User)
  115. {
  116. if (owner.AllocateThreadLocalStorage(out _tlsAddress) != KernelResult.Success)
  117. {
  118. return KernelResult.OutOfMemory;
  119. }
  120. MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize);
  121. }
  122. bool is64Bits;
  123. if (owner != null)
  124. {
  125. Owner = owner;
  126. owner.IncrementReferenceCount();
  127. owner.IncrementThreadCount();
  128. is64Bits = owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit);
  129. }
  130. else
  131. {
  132. is64Bits = true;
  133. }
  134. HostThread = new Thread(ThreadStart);
  135. Context = owner?.CreateExecutionContext() ?? new ProcessExecutionContext();
  136. Context.IsAarch32 = !is64Bits;
  137. Context.SetX(0, argsPtr);
  138. if (is64Bits)
  139. {
  140. Context.SetX(18, KSystemControl.GenerateRandom() | 1);
  141. Context.SetX(31, stackTop);
  142. }
  143. else
  144. {
  145. Context.SetX(13, (uint)stackTop);
  146. }
  147. Context.TpidrroEl0 = (long)_tlsAddress;
  148. ThreadUid = KernelContext.NewThreadUid();
  149. HostThread.Name = customThreadStart != null ? $"HLE.OsThread.{ThreadUid}" : $"HLE.GuestThread.{ThreadUid}";
  150. _hasBeenInitialized = true;
  151. _forcePausePermissionFlags = ThreadSchedState.ForcePauseMask;
  152. if (owner != null)
  153. {
  154. owner.AddThread(this);
  155. if (owner.IsPaused)
  156. {
  157. KernelContext.CriticalSection.Enter();
  158. if (TerminationRequested)
  159. {
  160. KernelContext.CriticalSection.Leave();
  161. return KernelResult.Success;
  162. }
  163. _forcePauseFlags |= ThreadSchedState.ProcessPauseFlag;
  164. CombineForcePauseFlags();
  165. KernelContext.CriticalSection.Leave();
  166. }
  167. }
  168. return KernelResult.Success;
  169. }
  170. public KernelResult Start()
  171. {
  172. if (!KernelContext.KernelInitialized)
  173. {
  174. KernelContext.CriticalSection.Enter();
  175. if (!TerminationRequested)
  176. {
  177. _forcePauseFlags |= ThreadSchedState.KernelInitPauseFlag;
  178. CombineForcePauseFlags();
  179. }
  180. KernelContext.CriticalSection.Leave();
  181. }
  182. KernelResult result = KernelResult.ThreadTerminating;
  183. KernelContext.CriticalSection.Enter();
  184. if (!ShallBeTerminated)
  185. {
  186. KThread currentThread = KernelStatic.GetCurrentThread();
  187. while (SchedFlags != ThreadSchedState.TerminationPending && (currentThread == null || !currentThread.TerminationRequested))
  188. {
  189. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.None)
  190. {
  191. result = KernelResult.InvalidState;
  192. break;
  193. }
  194. if (currentThread == null || currentThread._forcePauseFlags == ThreadSchedState.None)
  195. {
  196. if (Owner != null && _forcePauseFlags != ThreadSchedState.None)
  197. {
  198. CombineForcePauseFlags();
  199. }
  200. SetNewSchedFlags(ThreadSchedState.Running);
  201. StartHostThread();
  202. result = KernelResult.Success;
  203. break;
  204. }
  205. else
  206. {
  207. currentThread.CombineForcePauseFlags();
  208. KernelContext.CriticalSection.Leave();
  209. KernelContext.CriticalSection.Enter();
  210. if (currentThread.ShallBeTerminated)
  211. {
  212. break;
  213. }
  214. }
  215. }
  216. }
  217. KernelContext.CriticalSection.Leave();
  218. return result;
  219. }
  220. public ThreadSchedState PrepareForTermination()
  221. {
  222. KernelContext.CriticalSection.Enter();
  223. if (Owner != null && Owner.PinnedThreads[KernelStatic.GetCurrentThread().CurrentCore] == this)
  224. {
  225. Owner.UnpinThread(this);
  226. }
  227. ThreadSchedState result;
  228. if (Interlocked.CompareExchange(ref _shallBeTerminated, 1, 0) == 0)
  229. {
  230. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.None)
  231. {
  232. SchedFlags = ThreadSchedState.TerminationPending;
  233. }
  234. else
  235. {
  236. if (_forcePauseFlags != ThreadSchedState.None)
  237. {
  238. _forcePauseFlags &= ~ThreadSchedState.ThreadPauseFlag;
  239. ThreadSchedState oldSchedFlags = SchedFlags;
  240. SchedFlags &= ThreadSchedState.LowMask;
  241. AdjustScheduling(oldSchedFlags);
  242. }
  243. if (BasePriority >= 0x10)
  244. {
  245. SetPriority(0xF);
  246. }
  247. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Running)
  248. {
  249. // TODO: GIC distributor stuffs (sgir changes ect)
  250. Context.RequestInterrupt();
  251. }
  252. SignaledObj = null;
  253. ObjSyncResult = KernelResult.ThreadTerminating;
  254. ReleaseAndResume();
  255. }
  256. }
  257. result = SchedFlags;
  258. KernelContext.CriticalSection.Leave();
  259. return result & ThreadSchedState.LowMask;
  260. }
  261. public void Terminate()
  262. {
  263. ThreadSchedState state = PrepareForTermination();
  264. if (state != ThreadSchedState.TerminationPending)
  265. {
  266. KernelContext.Synchronization.WaitFor(new KSynchronizationObject[] { this }, -1, out _);
  267. }
  268. }
  269. public void HandlePostSyscall()
  270. {
  271. ThreadSchedState state;
  272. do
  273. {
  274. if (TerminationRequested)
  275. {
  276. Exit();
  277. // As the death of the thread is handled by the CPU emulator, we differ from the official kernel and return here.
  278. break;
  279. }
  280. KernelContext.CriticalSection.Enter();
  281. if (TerminationRequested)
  282. {
  283. state = ThreadSchedState.TerminationPending;
  284. }
  285. else
  286. {
  287. if (_forcePauseFlags != ThreadSchedState.None)
  288. {
  289. CombineForcePauseFlags();
  290. }
  291. state = ThreadSchedState.Running;
  292. }
  293. KernelContext.CriticalSection.Leave();
  294. } while (state == ThreadSchedState.TerminationPending);
  295. }
  296. public void Exit()
  297. {
  298. // TODO: Debug event.
  299. if (Owner != null)
  300. {
  301. Owner.ResourceLimit?.Release(LimitableResource.Thread, 0, 1);
  302. _hasBeenReleased = true;
  303. }
  304. KernelContext.CriticalSection.Enter();
  305. _forcePauseFlags &= ~ThreadSchedState.ForcePauseMask;
  306. _forcePausePermissionFlags = 0;
  307. bool decRef = ExitImpl();
  308. Context.StopRunning();
  309. KernelContext.CriticalSection.Leave();
  310. if (decRef)
  311. {
  312. DecrementReferenceCount();
  313. }
  314. }
  315. private bool ExitImpl()
  316. {
  317. KernelContext.CriticalSection.Enter();
  318. SetNewSchedFlags(ThreadSchedState.TerminationPending);
  319. bool decRef = Interlocked.Exchange(ref _hasExited, 1) == 0;
  320. Signal();
  321. KernelContext.CriticalSection.Leave();
  322. return decRef;
  323. }
  324. private int GetEffectiveRunningCore()
  325. {
  326. for (int coreNumber = 0; coreNumber < KScheduler.CpuCoresCount; coreNumber++)
  327. {
  328. if (KernelContext.Schedulers[coreNumber].CurrentThread == this)
  329. {
  330. return coreNumber;
  331. }
  332. }
  333. return -1;
  334. }
  335. public KernelResult Sleep(long timeout)
  336. {
  337. KernelContext.CriticalSection.Enter();
  338. if (ShallBeTerminated || SchedFlags == ThreadSchedState.TerminationPending)
  339. {
  340. KernelContext.CriticalSection.Leave();
  341. return KernelResult.ThreadTerminating;
  342. }
  343. SetNewSchedFlags(ThreadSchedState.Paused);
  344. if (timeout > 0)
  345. {
  346. KernelContext.TimeManager.ScheduleFutureInvocation(this, timeout);
  347. }
  348. KernelContext.CriticalSection.Leave();
  349. if (timeout > 0)
  350. {
  351. KernelContext.TimeManager.UnscheduleFutureInvocation(this);
  352. }
  353. return 0;
  354. }
  355. public void SetPriority(int priority)
  356. {
  357. KernelContext.CriticalSection.Enter();
  358. if (IsPinned)
  359. {
  360. _originalBasePriority = priority;
  361. }
  362. else
  363. {
  364. BasePriority = priority;
  365. }
  366. UpdatePriorityInheritance();
  367. KernelContext.CriticalSection.Leave();
  368. }
  369. public void Suspend(ThreadSchedState type)
  370. {
  371. _forcePauseFlags |= type;
  372. CombineForcePauseFlags();
  373. }
  374. public void Resume(ThreadSchedState type)
  375. {
  376. ThreadSchedState oldForcePauseFlags = _forcePauseFlags;
  377. _forcePauseFlags &= ~type;
  378. if ((oldForcePauseFlags & ~type) == ThreadSchedState.None)
  379. {
  380. ThreadSchedState oldSchedFlags = SchedFlags;
  381. SchedFlags &= ThreadSchedState.LowMask;
  382. AdjustScheduling(oldSchedFlags);
  383. }
  384. }
  385. public KernelResult SetActivity(bool pause)
  386. {
  387. lock (_activityOperationLock)
  388. {
  389. KernelResult result = KernelResult.Success;
  390. KernelContext.CriticalSection.Enter();
  391. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  392. if (lowNibble != ThreadSchedState.Paused && lowNibble != ThreadSchedState.Running)
  393. {
  394. KernelContext.CriticalSection.Leave();
  395. return KernelResult.InvalidState;
  396. }
  397. if (!ShallBeTerminated && SchedFlags != ThreadSchedState.TerminationPending)
  398. {
  399. if (pause)
  400. {
  401. // Pause, the force pause flag should be clear (thread is NOT paused).
  402. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  403. {
  404. Suspend(ThreadSchedState.ThreadPauseFlag);
  405. }
  406. else
  407. {
  408. result = KernelResult.InvalidState;
  409. }
  410. }
  411. else
  412. {
  413. // Unpause, the force pause flag should be set (thread is paused).
  414. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) != 0)
  415. {
  416. Resume(ThreadSchedState.ThreadPauseFlag);
  417. }
  418. else
  419. {
  420. result = KernelResult.InvalidState;
  421. }
  422. }
  423. }
  424. KernelContext.CriticalSection.Leave();
  425. if (result == KernelResult.Success && pause)
  426. {
  427. bool isThreadRunning = true;
  428. while (isThreadRunning)
  429. {
  430. KernelContext.CriticalSection.Enter();
  431. if (TerminationRequested)
  432. {
  433. KernelContext.CriticalSection.Leave();
  434. break;
  435. }
  436. isThreadRunning = false;
  437. if (IsPinned)
  438. {
  439. KThread currentThread = KernelStatic.GetCurrentThread();
  440. if (currentThread.TerminationRequested)
  441. {
  442. KernelContext.CriticalSection.Leave();
  443. result = KernelResult.ThreadTerminating;
  444. break;
  445. }
  446. _pinnedWaiters.AddLast(currentThread);
  447. currentThread.Reschedule(ThreadSchedState.Paused);
  448. }
  449. else
  450. {
  451. isThreadRunning = GetEffectiveRunningCore() >= 0;
  452. }
  453. KernelContext.CriticalSection.Leave();
  454. }
  455. }
  456. return result;
  457. }
  458. }
  459. public KernelResult GetThreadContext3(out ThreadContext context)
  460. {
  461. context = default;
  462. lock (_activityOperationLock)
  463. {
  464. KernelContext.CriticalSection.Enter();
  465. if ((_forcePauseFlags & ThreadSchedState.ThreadPauseFlag) == 0)
  466. {
  467. KernelContext.CriticalSection.Leave();
  468. return KernelResult.InvalidState;
  469. }
  470. if (!TerminationRequested)
  471. {
  472. context = GetCurrentContext();
  473. }
  474. KernelContext.CriticalSection.Leave();
  475. }
  476. return KernelResult.Success;
  477. }
  478. private static uint GetPsr(IExecutionContext context)
  479. {
  480. return context.Pstate & 0xFF0FFE20;
  481. }
  482. private ThreadContext GetCurrentContext()
  483. {
  484. const int MaxRegistersAArch32 = 15;
  485. const int MaxFpuRegistersAArch32 = 16;
  486. ThreadContext context = new ThreadContext();
  487. if (Owner.Flags.HasFlag(ProcessCreationFlags.Is64Bit))
  488. {
  489. for (int i = 0; i < context.Registers.Length; i++)
  490. {
  491. context.Registers[i] = Context.GetX(i);
  492. }
  493. for (int i = 0; i < context.FpuRegisters.Length; i++)
  494. {
  495. context.FpuRegisters[i] = Context.GetV(i);
  496. }
  497. context.Fp = Context.GetX(29);
  498. context.Lr = Context.GetX(30);
  499. context.Sp = Context.GetX(31);
  500. context.Pc = Context.Pc;
  501. context.Pstate = GetPsr(Context);
  502. context.Tpidr = (ulong)Context.TpidrroEl0;
  503. }
  504. else
  505. {
  506. for (int i = 0; i < MaxRegistersAArch32; i++)
  507. {
  508. context.Registers[i] = (uint)Context.GetX(i);
  509. }
  510. for (int i = 0; i < MaxFpuRegistersAArch32; i++)
  511. {
  512. context.FpuRegisters[i] = Context.GetV(i);
  513. }
  514. context.Pc = (uint)Context.Pc;
  515. context.Pstate = GetPsr(Context);
  516. context.Tpidr = (uint)Context.TpidrroEl0;
  517. }
  518. context.Fpcr = (uint)Context.Fpcr;
  519. context.Fpsr = (uint)Context.Fpsr;
  520. return context;
  521. }
  522. public void CancelSynchronization()
  523. {
  524. KernelContext.CriticalSection.Enter();
  525. if ((SchedFlags & ThreadSchedState.LowMask) != ThreadSchedState.Paused || !WaitingSync)
  526. {
  527. SyncCancelled = true;
  528. }
  529. else if (Withholder != null)
  530. {
  531. Withholder.Remove(WithholderNode);
  532. SetNewSchedFlags(ThreadSchedState.Running);
  533. Withholder = null;
  534. SyncCancelled = true;
  535. }
  536. else
  537. {
  538. SignaledObj = null;
  539. ObjSyncResult = KernelResult.Cancelled;
  540. SetNewSchedFlags(ThreadSchedState.Running);
  541. SyncCancelled = false;
  542. }
  543. KernelContext.CriticalSection.Leave();
  544. }
  545. public KernelResult SetCoreAndAffinityMask(int newCore, ulong newAffinityMask)
  546. {
  547. lock (_activityOperationLock)
  548. {
  549. KernelContext.CriticalSection.Enter();
  550. bool isCoreMigrationDisabled = _coreMigrationDisableCount != 0;
  551. // The value -3 is "do not change the preferred core".
  552. if (newCore == -3)
  553. {
  554. newCore = isCoreMigrationDisabled ? _originalPreferredCore : PreferredCore;
  555. if ((newAffinityMask & (1UL << newCore)) == 0)
  556. {
  557. KernelContext.CriticalSection.Leave();
  558. return KernelResult.InvalidCombination;
  559. }
  560. }
  561. if (isCoreMigrationDisabled)
  562. {
  563. _originalPreferredCore = newCore;
  564. _originalAffinityMask = newAffinityMask;
  565. }
  566. else
  567. {
  568. ulong oldAffinityMask = AffinityMask;
  569. PreferredCore = newCore;
  570. AffinityMask = newAffinityMask;
  571. if (oldAffinityMask != newAffinityMask)
  572. {
  573. int oldCore = ActiveCore;
  574. if (oldCore >= 0 && ((AffinityMask >> oldCore) & 1) == 0)
  575. {
  576. if (PreferredCore < 0)
  577. {
  578. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount(AffinityMask);
  579. }
  580. else
  581. {
  582. ActiveCore = PreferredCore;
  583. }
  584. }
  585. AdjustSchedulingForNewAffinity(oldAffinityMask, oldCore);
  586. }
  587. }
  588. KernelContext.CriticalSection.Leave();
  589. bool targetThreadPinned = true;
  590. while (targetThreadPinned)
  591. {
  592. KernelContext.CriticalSection.Enter();
  593. if (TerminationRequested)
  594. {
  595. KernelContext.CriticalSection.Leave();
  596. break;
  597. }
  598. targetThreadPinned = false;
  599. int coreNumber = GetEffectiveRunningCore();
  600. bool isPinnedThreadCurrentlyRunning = coreNumber >= 0;
  601. if (isPinnedThreadCurrentlyRunning && ((1UL << coreNumber) & AffinityMask) == 0)
  602. {
  603. if (IsPinned)
  604. {
  605. KThread currentThread = KernelStatic.GetCurrentThread();
  606. if (currentThread.TerminationRequested)
  607. {
  608. KernelContext.CriticalSection.Leave();
  609. return KernelResult.ThreadTerminating;
  610. }
  611. _pinnedWaiters.AddLast(currentThread);
  612. currentThread.Reschedule(ThreadSchedState.Paused);
  613. }
  614. else
  615. {
  616. targetThreadPinned = true;
  617. }
  618. }
  619. KernelContext.CriticalSection.Leave();
  620. }
  621. return KernelResult.Success;
  622. }
  623. }
  624. private void CombineForcePauseFlags()
  625. {
  626. ThreadSchedState oldFlags = SchedFlags;
  627. ThreadSchedState lowNibble = SchedFlags & ThreadSchedState.LowMask;
  628. SchedFlags = lowNibble | (_forcePauseFlags & _forcePausePermissionFlags);
  629. AdjustScheduling(oldFlags);
  630. }
  631. private void SetNewSchedFlags(ThreadSchedState newFlags)
  632. {
  633. KernelContext.CriticalSection.Enter();
  634. ThreadSchedState oldFlags = SchedFlags;
  635. SchedFlags = (oldFlags & ThreadSchedState.HighMask) | newFlags;
  636. if ((oldFlags & ThreadSchedState.LowMask) != newFlags)
  637. {
  638. AdjustScheduling(oldFlags);
  639. }
  640. KernelContext.CriticalSection.Leave();
  641. }
  642. public void ReleaseAndResume()
  643. {
  644. KernelContext.CriticalSection.Enter();
  645. if ((SchedFlags & ThreadSchedState.LowMask) == ThreadSchedState.Paused)
  646. {
  647. if (Withholder != null)
  648. {
  649. Withholder.Remove(WithholderNode);
  650. SetNewSchedFlags(ThreadSchedState.Running);
  651. Withholder = null;
  652. }
  653. else
  654. {
  655. SetNewSchedFlags(ThreadSchedState.Running);
  656. }
  657. }
  658. KernelContext.CriticalSection.Leave();
  659. }
  660. public void Reschedule(ThreadSchedState newFlags)
  661. {
  662. KernelContext.CriticalSection.Enter();
  663. ThreadSchedState oldFlags = SchedFlags;
  664. SchedFlags = (oldFlags & ThreadSchedState.HighMask) |
  665. (newFlags & ThreadSchedState.LowMask);
  666. AdjustScheduling(oldFlags);
  667. KernelContext.CriticalSection.Leave();
  668. }
  669. public void AddMutexWaiter(KThread requester)
  670. {
  671. AddToMutexWaitersList(requester);
  672. requester.MutexOwner = this;
  673. UpdatePriorityInheritance();
  674. }
  675. public void RemoveMutexWaiter(KThread thread)
  676. {
  677. if (thread._mutexWaiterNode?.List != null)
  678. {
  679. _mutexWaiters.Remove(thread._mutexWaiterNode);
  680. }
  681. thread.MutexOwner = null;
  682. UpdatePriorityInheritance();
  683. }
  684. public KThread RelinquishMutex(ulong mutexAddress, out int count)
  685. {
  686. count = 0;
  687. if (_mutexWaiters.First == null)
  688. {
  689. return null;
  690. }
  691. KThread newMutexOwner = null;
  692. LinkedListNode<KThread> currentNode = _mutexWaiters.First;
  693. do
  694. {
  695. // Skip all threads that are not waiting for this mutex.
  696. while (currentNode != null && currentNode.Value.MutexAddress != mutexAddress)
  697. {
  698. currentNode = currentNode.Next;
  699. }
  700. if (currentNode == null)
  701. {
  702. break;
  703. }
  704. LinkedListNode<KThread> nextNode = currentNode.Next;
  705. _mutexWaiters.Remove(currentNode);
  706. currentNode.Value.MutexOwner = newMutexOwner;
  707. if (newMutexOwner != null)
  708. {
  709. // New owner was already selected, re-insert on new owner list.
  710. newMutexOwner.AddToMutexWaitersList(currentNode.Value);
  711. }
  712. else
  713. {
  714. // New owner not selected yet, use current thread.
  715. newMutexOwner = currentNode.Value;
  716. }
  717. count++;
  718. currentNode = nextNode;
  719. }
  720. while (currentNode != null);
  721. if (newMutexOwner != null)
  722. {
  723. UpdatePriorityInheritance();
  724. newMutexOwner.UpdatePriorityInheritance();
  725. }
  726. return newMutexOwner;
  727. }
  728. private void UpdatePriorityInheritance()
  729. {
  730. // If any of the threads waiting for the mutex has
  731. // higher priority than the current thread, then
  732. // the current thread inherits that priority.
  733. int highestPriority = BasePriority;
  734. if (_mutexWaiters.First != null)
  735. {
  736. int waitingDynamicPriority = _mutexWaiters.First.Value.DynamicPriority;
  737. if (waitingDynamicPriority < highestPriority)
  738. {
  739. highestPriority = waitingDynamicPriority;
  740. }
  741. }
  742. if (highestPriority != DynamicPriority)
  743. {
  744. int oldPriority = DynamicPriority;
  745. DynamicPriority = highestPriority;
  746. AdjustSchedulingForNewPriority(oldPriority);
  747. if (MutexOwner != null)
  748. {
  749. // Remove and re-insert to ensure proper sorting based on new priority.
  750. MutexOwner._mutexWaiters.Remove(_mutexWaiterNode);
  751. MutexOwner.AddToMutexWaitersList(this);
  752. MutexOwner.UpdatePriorityInheritance();
  753. }
  754. }
  755. }
  756. private void AddToMutexWaitersList(KThread thread)
  757. {
  758. LinkedListNode<KThread> nextPrio = _mutexWaiters.First;
  759. int currentPriority = thread.DynamicPriority;
  760. while (nextPrio != null && nextPrio.Value.DynamicPriority <= currentPriority)
  761. {
  762. nextPrio = nextPrio.Next;
  763. }
  764. if (nextPrio != null)
  765. {
  766. thread._mutexWaiterNode = _mutexWaiters.AddBefore(nextPrio, thread);
  767. }
  768. else
  769. {
  770. thread._mutexWaiterNode = _mutexWaiters.AddLast(thread);
  771. }
  772. }
  773. private void AdjustScheduling(ThreadSchedState oldFlags)
  774. {
  775. if (oldFlags == SchedFlags)
  776. {
  777. return;
  778. }
  779. if (!IsSchedulable)
  780. {
  781. if (!_forcedUnschedulable)
  782. {
  783. // Ensure our thread is running and we have an event.
  784. StartHostThread();
  785. // If the thread is not schedulable, we want to just run or pause
  786. // it directly as we don't care about priority or the core it is
  787. // running on in this case.
  788. if (SchedFlags == ThreadSchedState.Running)
  789. {
  790. _schedulerWaitEvent.Set();
  791. }
  792. else
  793. {
  794. _schedulerWaitEvent.Reset();
  795. }
  796. }
  797. return;
  798. }
  799. if (oldFlags == ThreadSchedState.Running)
  800. {
  801. // Was running, now it's stopped.
  802. if (ActiveCore >= 0)
  803. {
  804. KernelContext.PriorityQueue.Unschedule(DynamicPriority, ActiveCore, this);
  805. }
  806. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  807. {
  808. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  809. {
  810. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  811. }
  812. }
  813. }
  814. else if (SchedFlags == ThreadSchedState.Running)
  815. {
  816. // Was stopped, now it's running.
  817. if (ActiveCore >= 0)
  818. {
  819. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  820. }
  821. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  822. {
  823. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  824. {
  825. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  826. }
  827. }
  828. }
  829. KernelContext.ThreadReselectionRequested = true;
  830. }
  831. private void AdjustSchedulingForNewPriority(int oldPriority)
  832. {
  833. if (SchedFlags != ThreadSchedState.Running || !IsSchedulable)
  834. {
  835. return;
  836. }
  837. // Remove thread from the old priority queues.
  838. if (ActiveCore >= 0)
  839. {
  840. KernelContext.PriorityQueue.Unschedule(oldPriority, ActiveCore, this);
  841. }
  842. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  843. {
  844. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  845. {
  846. KernelContext.PriorityQueue.Unsuggest(oldPriority, core, this);
  847. }
  848. }
  849. // Add thread to the new priority queues.
  850. KThread currentThread = KernelStatic.GetCurrentThread();
  851. if (ActiveCore >= 0)
  852. {
  853. if (currentThread == this)
  854. {
  855. KernelContext.PriorityQueue.SchedulePrepend(DynamicPriority, ActiveCore, this);
  856. }
  857. else
  858. {
  859. KernelContext.PriorityQueue.Schedule(DynamicPriority, ActiveCore, this);
  860. }
  861. }
  862. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  863. {
  864. if (core != ActiveCore && ((AffinityMask >> core) & 1) != 0)
  865. {
  866. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  867. }
  868. }
  869. KernelContext.ThreadReselectionRequested = true;
  870. }
  871. private void AdjustSchedulingForNewAffinity(ulong oldAffinityMask, int oldCore)
  872. {
  873. if (SchedFlags != ThreadSchedState.Running || DynamicPriority >= KScheduler.PrioritiesCount || !IsSchedulable)
  874. {
  875. return;
  876. }
  877. // Remove thread from the old priority queues.
  878. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  879. {
  880. if (((oldAffinityMask >> core) & 1) != 0)
  881. {
  882. if (core == oldCore)
  883. {
  884. KernelContext.PriorityQueue.Unschedule(DynamicPriority, core, this);
  885. }
  886. else
  887. {
  888. KernelContext.PriorityQueue.Unsuggest(DynamicPriority, core, this);
  889. }
  890. }
  891. }
  892. // Add thread to the new priority queues.
  893. for (int core = 0; core < KScheduler.CpuCoresCount; core++)
  894. {
  895. if (((AffinityMask >> core) & 1) != 0)
  896. {
  897. if (core == ActiveCore)
  898. {
  899. KernelContext.PriorityQueue.Schedule(DynamicPriority, core, this);
  900. }
  901. else
  902. {
  903. KernelContext.PriorityQueue.Suggest(DynamicPriority, core, this);
  904. }
  905. }
  906. }
  907. KernelContext.ThreadReselectionRequested = true;
  908. }
  909. public void SetEntryArguments(long argsPtr, int threadHandle)
  910. {
  911. Context.SetX(0, (ulong)argsPtr);
  912. Context.SetX(1, (ulong)threadHandle);
  913. }
  914. public void TimeUp()
  915. {
  916. ReleaseAndResume();
  917. }
  918. public string GetGuestStackTrace()
  919. {
  920. return Owner.Debugger.GetGuestStackTrace(this);
  921. }
  922. public string GetGuestRegisterPrintout()
  923. {
  924. return Owner.Debugger.GetCpuRegisterPrintout(this);
  925. }
  926. public void PrintGuestStackTrace()
  927. {
  928. Logger.Info?.Print(LogClass.Cpu, $"Guest stack trace:\n{GetGuestStackTrace()}\n");
  929. }
  930. public void PrintGuestRegisterPrintout()
  931. {
  932. Logger.Info?.Print(LogClass.Cpu, $"Guest CPU registers:\n{GetGuestRegisterPrintout()}\n");
  933. }
  934. public void AddCpuTime(long ticks)
  935. {
  936. Interlocked.Add(ref _totalTimeRunning, ticks);
  937. }
  938. public void StartHostThread()
  939. {
  940. if (_schedulerWaitEvent == null)
  941. {
  942. var schedulerWaitEvent = new ManualResetEvent(false);
  943. if (Interlocked.Exchange(ref _schedulerWaitEvent, schedulerWaitEvent) == null)
  944. {
  945. HostThread.Start();
  946. }
  947. else
  948. {
  949. schedulerWaitEvent.Dispose();
  950. }
  951. }
  952. }
  953. private void ThreadStart()
  954. {
  955. _schedulerWaitEvent.WaitOne();
  956. KernelStatic.SetKernelContext(KernelContext, this);
  957. if (_customThreadStart != null)
  958. {
  959. _customThreadStart();
  960. }
  961. else
  962. {
  963. Owner.Context.Execute(Context, _entrypoint);
  964. }
  965. Context.Dispose();
  966. _schedulerWaitEvent.Dispose();
  967. }
  968. public void MakeUnschedulable()
  969. {
  970. _forcedUnschedulable = true;
  971. }
  972. public override bool IsSignaled()
  973. {
  974. return _hasExited != 0;
  975. }
  976. protected override void Destroy()
  977. {
  978. if (_hasBeenInitialized)
  979. {
  980. FreeResources();
  981. bool released = Owner != null || _hasBeenReleased;
  982. if (Owner != null)
  983. {
  984. Owner.ResourceLimit?.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  985. Owner.DecrementReferenceCount();
  986. }
  987. else
  988. {
  989. KernelContext.ResourceLimit.Release(LimitableResource.Thread, 1, released ? 0 : 1);
  990. }
  991. }
  992. }
  993. private void FreeResources()
  994. {
  995. Owner?.RemoveThread(this);
  996. if (_tlsAddress != 0 && Owner.FreeThreadLocalStorage(_tlsAddress) != KernelResult.Success)
  997. {
  998. throw new InvalidOperationException("Unexpected failure freeing thread local storage.");
  999. }
  1000. KernelContext.CriticalSection.Enter();
  1001. // Wake up all threads that may be waiting for a mutex being held by this thread.
  1002. foreach (KThread thread in _mutexWaiters)
  1003. {
  1004. thread.MutexOwner = null;
  1005. thread._originalPreferredCore = 0;
  1006. thread.ObjSyncResult = KernelResult.InvalidState;
  1007. thread.ReleaseAndResume();
  1008. }
  1009. KernelContext.CriticalSection.Leave();
  1010. Owner?.DecrementThreadCountAndTerminateIfZero();
  1011. }
  1012. public void Pin()
  1013. {
  1014. IsPinned = true;
  1015. _coreMigrationDisableCount++;
  1016. int activeCore = ActiveCore;
  1017. _originalPreferredCore = PreferredCore;
  1018. _originalAffinityMask = AffinityMask;
  1019. ActiveCore = CurrentCore;
  1020. PreferredCore = CurrentCore;
  1021. AffinityMask = 1UL << CurrentCore;
  1022. if (activeCore != CurrentCore || _originalAffinityMask != AffinityMask)
  1023. {
  1024. AdjustSchedulingForNewAffinity(_originalAffinityMask, activeCore);
  1025. }
  1026. _originalBasePriority = BasePriority;
  1027. BasePriority = Math.Min(_originalBasePriority, BitOperations.TrailingZeroCount(Owner.Capabilities.AllowedThreadPriosMask) - 1);
  1028. UpdatePriorityInheritance();
  1029. // Disallows thread pausing
  1030. _forcePausePermissionFlags &= ~ThreadSchedState.ThreadPauseFlag;
  1031. CombineForcePauseFlags();
  1032. // TODO: Assign reduced SVC permissions
  1033. }
  1034. public void Unpin()
  1035. {
  1036. IsPinned = false;
  1037. _coreMigrationDisableCount--;
  1038. ulong affinityMask = AffinityMask;
  1039. int activeCore = ActiveCore;
  1040. PreferredCore = _originalPreferredCore;
  1041. AffinityMask = _originalAffinityMask;
  1042. if (AffinityMask != affinityMask)
  1043. {
  1044. if ((AffinityMask & 1UL << ActiveCore) != 0)
  1045. {
  1046. if (PreferredCore >= 0)
  1047. {
  1048. ActiveCore = PreferredCore;
  1049. }
  1050. else
  1051. {
  1052. ActiveCore = sizeof(ulong) * 8 - 1 - BitOperations.LeadingZeroCount((ulong)AffinityMask);
  1053. }
  1054. AdjustSchedulingForNewAffinity(affinityMask, activeCore);
  1055. }
  1056. }
  1057. BasePriority = _originalBasePriority;
  1058. UpdatePriorityInheritance();
  1059. if (!TerminationRequested)
  1060. {
  1061. // Allows thread pausing
  1062. _forcePausePermissionFlags |= ThreadSchedState.ThreadPauseFlag;
  1063. CombineForcePauseFlags();
  1064. // TODO: Restore SVC permissions
  1065. }
  1066. // Wake up waiters
  1067. foreach (KThread waiter in _pinnedWaiters)
  1068. {
  1069. waiter.ReleaseAndResume();
  1070. }
  1071. _pinnedWaiters.Clear();
  1072. }
  1073. public void SynchronizePreemptionState()
  1074. {
  1075. KernelContext.CriticalSection.Enter();
  1076. if (Owner != null && Owner.PinnedThreads[CurrentCore] == this)
  1077. {
  1078. ClearUserInterruptFlag();
  1079. Owner.UnpinThread(this);
  1080. }
  1081. KernelContext.CriticalSection.Leave();
  1082. }
  1083. public ushort GetUserDisableCount()
  1084. {
  1085. return Owner.CpuMemory.Read<ushort>(_tlsAddress + TlsUserDisableCountOffset);
  1086. }
  1087. public void SetUserInterruptFlag()
  1088. {
  1089. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 1);
  1090. }
  1091. public void ClearUserInterruptFlag()
  1092. {
  1093. Owner.CpuMemory.Write<ushort>(_tlsAddress + TlsUserInterruptFlagOffset, 0);
  1094. }
  1095. }
  1096. }