[NTOS:CC][NTOS:MM] Add back CcRosTrimCache and add Delay for MM to work. (#5630)

MM/CC Add back CcRosTrimCache as suggested by Thomas Faber which was removed in 0.4.15-dev-1717-g 	d8cdb89fb0
and call it once in a while also during read-operations.

fixes JIRA issue: CORE-17624 'Cannot copy files > RAMsize anymore using TotalCommander'


1st testbot results on top of 0.4.15-dev-6526-g8d35887
VBox: https://reactos.org/testman/compare.php?ids=89111,89120 (additional random reboot in winhttp:winhttp)
KVM: https://reactos.org/testman/compare.php?ids=89110,89119
We do assume that reboot to be unrelated.

2nd testbot results on top of 0.4.15-dev-6526-g8d35887
VBox: https://reactos.org/testman/compare.php?ids=89111,89232
KVM: https://reactos.org/testman/compare.php?ids=89110,89233
This commit is contained in:
Doug Lyons 2023-09-06 06:34:25 -05:00 committed by GitHub
parent 289dec6c39
commit 2b14056600
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 141 additions and 0 deletions

View File

@ -449,6 +449,124 @@ CcRosFlushDirtyPages (
return STATUS_SUCCESS;
}
VOID
CcRosTrimCache(
_In_ ULONG Target,
_Out_ PULONG NrFreed)
/*
* FUNCTION: Try to free some memory from the file cache.
* ARGUMENTS:
* Target - The number of pages to be freed.
* NrFreed - Points to a variable where the number of pages
* actually freed is returned.
*/
{
PLIST_ENTRY current_entry;
PROS_VACB current;
ULONG PagesFreed;
KIRQL oldIrql;
LIST_ENTRY FreeList;
PFN_NUMBER Page;
ULONG i;
BOOLEAN FlushedPages = FALSE;
DPRINT("CcRosTrimCache(Target %lu)\n", Target);
InitializeListHead(&FreeList);
*NrFreed = 0;
retry:
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
current_entry = VacbLruListHead.Flink;
while (current_entry != &VacbLruListHead)
{
ULONG Refs;
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
VacbLruListEntry);
current_entry = current_entry->Flink;
KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
/* Reference the VACB */
CcRosVacbIncRefCount(current);
/* Check if it's mapped and not dirty */
if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
{
/* Page out the VACB */
for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
{
Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
MmPageOutPhysicalAddress(Page);
}
}
/* Dereference the VACB */
Refs = CcRosVacbDecRefCount(current);
/* Check if we can free this entry now */
if (Refs < 2)
{
ASSERT(!current->Dirty);
ASSERT(!current->MappedCount);
ASSERT(Refs == 1);
RemoveEntryList(&current->CacheMapVacbListEntry);
RemoveEntryList(&current->VacbLruListEntry);
InitializeListHead(&current->VacbLruListEntry);
InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
/* Calculate how many pages we freed for Mm */
PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
Target -= PagesFreed;
(*NrFreed) += PagesFreed;
}
KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
}
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
/* Try flushing pages if we haven't met our target */
if ((Target > 0) && !FlushedPages)
{
/* Flush dirty pages to disk */
CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
FlushedPages = TRUE;
/* We can only swap as many pages as we flushed */
if (PagesFreed < Target) Target = PagesFreed;
/* Check if we flushed anything */
if (PagesFreed != 0)
{
/* Try again after flushing dirty pages */
DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
goto retry;
}
}
while (!IsListEmpty(&FreeList))
{
ULONG Refs;
current_entry = RemoveHeadList(&FreeList);
current = CONTAINING_RECORD(current_entry,
ROS_VACB,
CacheMapVacbListEntry);
InitializeListHead(&current->CacheMapVacbListEntry);
Refs = CcRosVacbDecRefCount(current);
ASSERT(Refs == 0);
}
DPRINT("Evicted %lu cache pages\n", (*NrFreed));
}
NTSTATUS
CcRosReleaseVacb (
PROS_SHARED_CACHE_MAP SharedCacheMap,

View File

@ -316,6 +316,15 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
PPFN_NUMBER AllocatedPage)
{
PFN_NUMBER Page;
static INT i = 0;
static LARGE_INTEGER TinyTime = {{-1L, -1L}};
/* Delay some requests for the Memory Manager to recover pages */
if (i++ >= 100)
{
KeDelayExecutionThread(KernelMode, FALSE, &TinyTime);
i = 0;
}
/*
* Actually allocate the page.
@ -335,6 +344,10 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
return(STATUS_SUCCESS);
}
VOID
CcRosTrimCache(
_In_ ULONG Target,
_Out_ PULONG NrFreed);
VOID NTAPI
MiBalancerThread(PVOID Unused)
@ -361,6 +374,8 @@ MiBalancerThread(PVOID Unused)
if (Status == STATUS_WAIT_0 || Status == STATUS_WAIT_1)
{
ULONG InitialTarget = 0;
ULONG Target;
ULONG NrFreedPages;
do
{
@ -372,6 +387,14 @@ MiBalancerThread(PVOID Unused)
InitialTarget = MiTrimMemoryConsumer(i, InitialTarget);
}
/* Trim cache */
Target = max(InitialTarget, abs(MiMinimumAvailablePages - MmAvailablePages));
if (Target)
{
CcRosTrimCache(Target, &NrFreedPages);
InitialTarget -= min(NrFreedPages, InitialTarget);
}
/* No pages left to swap! */
if (InitialTarget != 0 &&
InitialTarget == OldTarget)