diff --git a/ntoskrnl/cc/view.c b/ntoskrnl/cc/view.c index 0e6917ea18a..efa4c8523d4 100644 --- a/ntoskrnl/cc/view.c +++ b/ntoskrnl/cc/view.c @@ -449,6 +449,124 @@ CcRosFlushDirtyPages ( return STATUS_SUCCESS; } +VOID +CcRosTrimCache( + _In_ ULONG Target, + _Out_ PULONG NrFreed) +/* + * FUNCTION: Try to free some memory from the file cache. + * ARGUMENTS: + * Target - The number of pages to be freed. + * NrFreed - Points to a variable where the number of pages + * actually freed is returned. + */ +{ + PLIST_ENTRY current_entry; + PROS_VACB current; + ULONG PagesFreed; + KIRQL oldIrql; + LIST_ENTRY FreeList; + PFN_NUMBER Page; + ULONG i; + BOOLEAN FlushedPages = FALSE; + + DPRINT("CcRosTrimCache(Target %lu)\n", Target); + + InitializeListHead(&FreeList); + + *NrFreed = 0; + +retry: + oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + + current_entry = VacbLruListHead.Flink; + while (current_entry != &VacbLruListHead) + { + ULONG Refs; + + current = CONTAINING_RECORD(current_entry, + ROS_VACB, + VacbLruListEntry); + current_entry = current_entry->Flink; + + KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); + + /* Reference the VACB */ + CcRosVacbIncRefCount(current); + + /* Check if it's mapped and not dirty */ + if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty) + { + /* Page out the VACB */ + for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++) + { + Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT); + + MmPageOutPhysicalAddress(Page); + } + } + + /* Dereference the VACB */ + Refs = CcRosVacbDecRefCount(current); + + /* Check if we can free this entry now */ + if (Refs < 2) + { + ASSERT(!current->Dirty); + ASSERT(!current->MappedCount); + ASSERT(Refs == 1); + + RemoveEntryList(¤t->CacheMapVacbListEntry); + RemoveEntryList(¤t->VacbLruListEntry); + InitializeListHead(¤t->VacbLruListEntry); + InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); + + /* Calculate how many pages we freed for Mm */ + PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target); + Target -= PagesFreed; + (*NrFreed) += PagesFreed; + } + + KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); + } + + KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); + + /* Try flushing pages if we haven't met our target */ + if ((Target > 0) && !FlushedPages) + { + /* Flush dirty pages to disk */ + CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE); + FlushedPages = TRUE; + + /* We can only swap as many pages as we flushed */ + if (PagesFreed < Target) Target = PagesFreed; + + /* Check if we flushed anything */ + if (PagesFreed != 0) + { + /* Try again after flushing dirty pages */ + DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed); + goto retry; + } + } + + while (!IsListEmpty(&FreeList)) + { + ULONG Refs; + + current_entry = RemoveHeadList(&FreeList); + current = CONTAINING_RECORD(current_entry, + ROS_VACB, + CacheMapVacbListEntry); + InitializeListHead(¤t->CacheMapVacbListEntry); + Refs = CcRosVacbDecRefCount(current); + ASSERT(Refs == 0); + } + + DPRINT("Evicted %lu cache pages\n", (*NrFreed)); +} + NTSTATUS CcRosReleaseVacb ( PROS_SHARED_CACHE_MAP SharedCacheMap, diff --git a/ntoskrnl/mm/balance.c b/ntoskrnl/mm/balance.c index ab71d0f195b..f33189dc247 100644 --- a/ntoskrnl/mm/balance.c +++ b/ntoskrnl/mm/balance.c @@ -316,6 +316,15 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, PPFN_NUMBER AllocatedPage) { PFN_NUMBER Page; + static INT i = 0; + static LARGE_INTEGER TinyTime = {{-1L, -1L}}; + + /* Delay some requests for the Memory Manager to recover pages */ + if (i++ >= 100) + { + KeDelayExecutionThread(KernelMode, FALSE, &TinyTime); + i = 0; + } /* * Actually allocate the page. @@ -335,6 +344,10 @@ MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait, return(STATUS_SUCCESS); } +VOID +CcRosTrimCache( + _In_ ULONG Target, + _Out_ PULONG NrFreed); VOID NTAPI MiBalancerThread(PVOID Unused) @@ -361,6 +374,8 @@ MiBalancerThread(PVOID Unused) if (Status == STATUS_WAIT_0 || Status == STATUS_WAIT_1) { ULONG InitialTarget = 0; + ULONG Target; + ULONG NrFreedPages; do { @@ -372,6 +387,14 @@ MiBalancerThread(PVOID Unused) InitialTarget = MiTrimMemoryConsumer(i, InitialTarget); } + /* Trim cache */ + Target = max(InitialTarget, abs(MiMinimumAvailablePages - MmAvailablePages)); + if (Target) + { + CcRosTrimCache(Target, &NrFreedPages); + InitialTarget -= min(NrFreedPages, InitialTarget); + } + /* No pages left to swap! */ if (InitialTarget != 0 && InitialTarget == OldTarget)