/* * COPYRIGHT: See COPYING in the top level directory * PROJECT: ReactOS kernel * FILE: ntoskrnl/cc/view.c * PURPOSE: Cache manager * * PROGRAMMERS: David Welch (welch@mcmail.com) * Pierre Schweitzer (pierre@reactos.org) */ /* NOTES ********************************************************************** * * This is not the NT implementation of a file cache nor anything much like * it. * * The general procedure for a filesystem to implement a read or write * dispatch routine is as follows * * (1) If caching for the FCB hasn't been initiated then so do by calling * CcInitializeFileCache. * * (2) For each 4k region which is being read or written obtain a cache page * by calling CcRequestCachePage. * * (3) If either the page is being read or not completely written, and it is * not up to date then read its data from the underlying medium. If the read * fails then call CcReleaseCachePage with VALID as FALSE and return a error. * * (4) Copy the data into or out of the page as necessary. * * (5) Release the cache page */ /* INCLUDES ******************************************************************/ #include #define NDEBUG #include /* GLOBALS *******************************************************************/ LIST_ENTRY DirtyVacbListHead; static LIST_ENTRY VacbLruListHead; NPAGED_LOOKASIDE_LIST iBcbLookasideList; static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList; static NPAGED_LOOKASIDE_LIST VacbLookasideList; /* Internal vars (MS): * - Threshold above which lazy writer will start action * - Amount of dirty pages * - List for deferred writes * - Spinlock when dealing with the deferred list * - List for "clean" shared cache maps */ ULONG CcDirtyPageThreshold = 0; ULONG CcTotalDirtyPages = 0; LIST_ENTRY CcDeferredWrites; KSPIN_LOCK CcDeferredWriteSpinLock; LIST_ENTRY CcCleanSharedCacheMapList; #if DBG ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line) { ULONG Refs; Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount); if (vacb->SharedCacheMap->Trace) { DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n", file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); } return Refs; } ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line) { ULONG Refs; Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount); ASSERT(!(Refs == 0 && vacb->Dirty)); if (vacb->SharedCacheMap->Trace) { DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n", file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); } if (Refs == 0) { CcRosInternalFreeVacb(vacb); } return Refs; } ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line) { ULONG Refs; Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0); if (vacb->SharedCacheMap->Trace) { DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n", file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); } return Refs; } #endif /* FUNCTIONS *****************************************************************/ VOID NTAPI CcRosTraceCacheMap ( PROS_SHARED_CACHE_MAP SharedCacheMap, BOOLEAN Trace ) { #if DBG KIRQL oldirql; PLIST_ENTRY current_entry; PROS_VACB current; if (!SharedCacheMap) return; SharedCacheMap->Trace = Trace; if (Trace) { DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock); KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; while (current_entry != &SharedCacheMap->CacheMapVacbListHead) { current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); current_entry = current_entry->Flink; DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n", current, current->ReferenceCount, current->Dirty, current->PageOut ); } KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql); } else { DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap); } #else UNREFERENCED_PARAMETER(SharedCacheMap); UNREFERENCED_PARAMETER(Trace); #endif } NTSTATUS NTAPI MmFlushVirtualMemory(IN PEPROCESS Process, IN OUT PVOID *BaseAddress, IN OUT PSIZE_T RegionSize, OUT PIO_STATUS_BLOCK IoStatusBlock); NTSTATUS NTAPI CcRosFlushVacb ( PROS_VACB Vacb) { IO_STATUS_BLOCK Iosb; SIZE_T FlushSize = min(VACB_MAPPING_GRANULARITY, Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart); NTSTATUS Status; CcRosUnmarkDirtyVacb(Vacb, TRUE); Status = MmFlushVirtualMemory(NULL, &Vacb->BaseAddress, &FlushSize, &Iosb); if (!NT_SUCCESS(Status)) CcRosMarkDirtyVacb(Vacb); return Status; } NTSTATUS NTAPI CcRosFlushDirtyPages ( ULONG Target, PULONG Count, BOOLEAN Wait, BOOLEAN CalledFromLazy) { PLIST_ENTRY current_entry; PROS_VACB current; BOOLEAN Locked; NTSTATUS Status; KIRQL OldIrql; DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target); (*Count) = 0; KeEnterCriticalRegion(); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); current_entry = DirtyVacbListHead.Flink; if (current_entry == &DirtyVacbListHead) { DPRINT("No Dirty pages\n"); } while ((current_entry != &DirtyVacbListHead) && (Target > 0)) { current = CONTAINING_RECORD(current_entry, ROS_VACB, DirtyVacbListEntry); current_entry = current_entry->Flink; CcRosVacbIncRefCount(current); /* When performing lazy write, don't handle temporary files */ if (CalledFromLazy && BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) { CcRosVacbDecRefCount(current); continue; } /* Don't attempt to lazy write the files that asked not to */ if (CalledFromLazy && BooleanFlagOn(current->SharedCacheMap->Flags, WRITEBEHIND_DISABLED)) { CcRosVacbDecRefCount(current); continue; } ASSERT(current->Dirty); KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite( current->SharedCacheMap->LazyWriteContext, Wait); if (!Locked) { DPRINT("Not locked!"); ASSERT(!Wait); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); CcRosVacbDecRefCount(current); continue; } Status = CcRosFlushVacb(current); current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite( current->SharedCacheMap->LazyWriteContext); /* We release the VACB before acquiring the lock again, because * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a * Refcount. Freeing must be done outside of the lock. * The refcount is decremented atomically. So this is OK. */ CcRosVacbDecRefCount(current); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) && (Status != STATUS_MEDIA_WRITE_PROTECTED)) { DPRINT1("CC: Failed to flush VACB.\n"); } else { ULONG PagesFreed; /* How many pages did we free? */ PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE; (*Count) += PagesFreed; if (!Wait) { /* Make sure we don't overflow target! */ if (Target < PagesFreed) { /* If we would have, jump to zero directly */ Target = 0; } else { Target -= PagesFreed; } } } current_entry = DirtyVacbListHead.Flink; } KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); KeLeaveCriticalRegion(); DPRINT("CcRosFlushDirtyPages() finished\n"); return STATUS_SUCCESS; } NTSTATUS NTAPI CcRosReleaseVacb ( PROS_SHARED_CACHE_MAP SharedCacheMap, PROS_VACB Vacb, BOOLEAN Valid, BOOLEAN Dirty, BOOLEAN Mapped) { ULONG Refs; ASSERT(SharedCacheMap); DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n", SharedCacheMap, Vacb, Valid); Vacb->Valid = Valid; if (Dirty && !Vacb->Dirty) { CcRosMarkDirtyVacb(Vacb); } if (Mapped) { if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1) { CcRosVacbIncRefCount(Vacb); } } Refs = CcRosVacbDecRefCount(Vacb); ASSERT(Refs > 0); return STATUS_SUCCESS; } /* Returns with VACB Lock Held! */ PROS_VACB NTAPI CcRosLookupVacb ( PROS_SHARED_CACHE_MAP SharedCacheMap, LONGLONG FileOffset) { PLIST_ENTRY current_entry; PROS_VACB current; KIRQL oldIrql; ASSERT(SharedCacheMap); DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n", SharedCacheMap, FileOffset); oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; while (current_entry != &SharedCacheMap->CacheMapVacbListHead) { current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); if (IsPointInRange(current->FileOffset.QuadPart, VACB_MAPPING_GRANULARITY, FileOffset)) { CcRosVacbIncRefCount(current); KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); return current; } if (current->FileOffset.QuadPart > FileOffset) break; current_entry = current_entry->Flink; } KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); return NULL; } VOID NTAPI CcRosMarkDirtyVacb ( PROS_VACB Vacb) { KIRQL oldIrql; PROS_SHARED_CACHE_MAP SharedCacheMap; ULONG Length = VACB_MAPPING_GRANULARITY; SharedCacheMap = Vacb->SharedCacheMap; oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); ASSERT(!Vacb->Dirty); InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry); #if 0 if (Vacb->FileOffset.QuadPart + Length > SharedCacheMap->SectionSize.QuadPart) Length = SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart; #endif CcTotalDirtyPages += PAGE_ROUND_UP(Length) / PAGE_SIZE; Vacb->SharedCacheMap->DirtyPages += PAGE_ROUND_UP(Length) / PAGE_SIZE; CcRosVacbIncRefCount(Vacb); /* Move to the tail of the LRU list */ RemoveEntryList(&Vacb->VacbLruListEntry); InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry); Vacb->Dirty = TRUE; KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); /* Schedule a lazy writer run to now that we have dirty VACB */ if (!LazyWriter.ScanActive) { CcScheduleLazyWriteScan(FALSE); } KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); /* Tell Mm */ MmMakePagesDirty(NULL, Vacb->BaseAddress, Length); } VOID NTAPI CcRosUnmarkDirtyVacb ( PROS_VACB Vacb, BOOLEAN LockViews) { KIRQL oldIrql; PROS_SHARED_CACHE_MAP SharedCacheMap; ULONG Length = VACB_MAPPING_GRANULARITY; SharedCacheMap = Vacb->SharedCacheMap; if (LockViews) { oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); } ASSERT(Vacb->Dirty); Vacb->Dirty = FALSE; RemoveEntryList(&Vacb->DirtyVacbListEntry); InitializeListHead(&Vacb->DirtyVacbListEntry); #if 0 if (Vacb->FileOffset.QuadPart + Length > SharedCacheMap->SectionSize.QuadPart) Length = SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart; #endif CcTotalDirtyPages -= PAGE_ROUND_UP(Length) / PAGE_SIZE; Vacb->SharedCacheMap->DirtyPages -= PAGE_ROUND_UP(Length) / PAGE_SIZE; CcRosVacbDecRefCount(Vacb); if (LockViews) { KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); } } static BOOLEAN CcRosFreeUnusedVacb ( PULONG Count) { ULONG cFreed; BOOLEAN Freed; KIRQL oldIrql; PROS_VACB current; LIST_ENTRY FreeList; PLIST_ENTRY current_entry; cFreed = 0; Freed = FALSE; InitializeListHead(&FreeList); oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); /* Browse all the available VACB */ current_entry = VacbLruListHead.Flink; while (current_entry != &VacbLruListHead) { ULONG Refs; current = CONTAINING_RECORD(current_entry, ROS_VACB, VacbLruListEntry); current_entry = current_entry->Flink; KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); /* Only deal with unused VACB, we will free them */ Refs = CcRosVacbGetRefCount(current); if (Refs < 2) { ASSERT(!current->Dirty); ASSERT(!current->MappedCount); ASSERT(Refs == 1); /* Reset and move to free list */ RemoveEntryList(¤t->CacheMapVacbListEntry); RemoveEntryList(¤t->VacbLruListEntry); InitializeListHead(¤t->VacbLruListEntry); InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); } KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); } KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); /* And now, free any of the found VACB, that'll free memory! */ while (!IsListEmpty(&FreeList)) { ULONG Refs; current_entry = RemoveHeadList(&FreeList); current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); InitializeListHead(¤t->CacheMapVacbListEntry); Refs = CcRosVacbDecRefCount(current); ASSERT(Refs == 0); ++cFreed; } /* If we freed at least one VACB, return success */ if (cFreed != 0) { Freed = TRUE; } /* If caller asked for free count, return it */ if (Count != NULL) { *Count = cFreed; } return Freed; } static NTSTATUS CcRosCreateVacb ( PROS_SHARED_CACHE_MAP SharedCacheMap, LONGLONG FileOffset, PROS_VACB *Vacb) { PROS_VACB current; PROS_VACB previous; PLIST_ENTRY current_entry; NTSTATUS Status; KIRQL oldIrql; ULONG Refs; BOOLEAN Retried; SIZE_T ViewSize = VACB_MAPPING_GRANULARITY; ASSERT(SharedCacheMap); DPRINT("CcRosCreateVacb()\n"); if (FileOffset >= SharedCacheMap->SectionSize.QuadPart) { *Vacb = NULL; return STATUS_INVALID_PARAMETER; } current = ExAllocateFromNPagedLookasideList(&VacbLookasideList); current->BaseAddress = NULL; current->Valid = FALSE; current->Dirty = FALSE; current->PageOut = FALSE; current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); current->SharedCacheMap = SharedCacheMap; #if DBG if (SharedCacheMap->Trace) { DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current); } #endif current->MappedCount = 0; current->ReferenceCount = 0; InitializeListHead(¤t->CacheMapVacbListEntry); InitializeListHead(¤t->DirtyVacbListEntry); InitializeListHead(¤t->VacbLruListEntry); CcRosVacbIncRefCount(current); Retried = FALSE; Retry: /* Map VACB in system space */ Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset); if (!NT_SUCCESS(Status)) { ULONG Freed; /* If no space left, try to prune unused VACB * to recover space to map our VACB * If it succeed, retry to map, otherwise * just fail. */ if (!Retried && CcRosFreeUnusedVacb(&Freed)) { DPRINT("Prunned %d VACB, trying again\n", Freed); Retried = TRUE; goto Retry; } ExFreeToNPagedLookasideList(&VacbLookasideList, current); return Status; } oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); *Vacb = current; /* There is window between the call to CcRosLookupVacb * and CcRosCreateVacb. We must check if a VACB for the * file offset exist. If there is a VACB, we release * our newly created VACB and return the existing one. */ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; previous = NULL; while (current_entry != &SharedCacheMap->CacheMapVacbListHead) { current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); if (IsPointInRange(current->FileOffset.QuadPart, VACB_MAPPING_GRANULARITY, FileOffset)) { CcRosVacbIncRefCount(current); KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); #if DBG if (SharedCacheMap->Trace) { DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n", SharedCacheMap, (*Vacb), current); } #endif KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); Refs = CcRosVacbDecRefCount(*Vacb); ASSERT(Refs == 0); *Vacb = current; return STATUS_SUCCESS; } if (current->FileOffset.QuadPart < FileOffset) { ASSERT(previous == NULL || previous->FileOffset.QuadPart < current->FileOffset.QuadPart); previous = current; } if (current->FileOffset.QuadPart > FileOffset) break; current_entry = current_entry->Flink; } /* There was no existing VACB. */ current = *Vacb; if (previous) { InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry); } else { InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry); } KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); MI_SET_USAGE(MI_USAGE_CACHE); #if MI_TRACE_PFNS if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer)) { PWCHAR pos; ULONG len = 0; pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\'); if (pos) { len = wcslen(pos) * sizeof(WCHAR); snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos); } else { snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName); } } #endif /* Reference it to allow release */ CcRosVacbIncRefCount(current); return Status; } BOOLEAN NTAPI CcRosEnsureVacbResident( _In_ PROS_VACB Vacb, _In_ BOOLEAN Wait, _In_ BOOLEAN NoRead, _In_ ULONG Offset, _In_ ULONG Length ) { PVOID BaseAddress; ASSERT((Offset + Length) <= VACB_MAPPING_GRANULARITY); if ((Vacb->FileOffset.QuadPart + Offset) > Vacb->SharedCacheMap->FileSize.QuadPart) return FALSE; BaseAddress = (PVOID)((ULONG_PTR)Vacb->BaseAddress + Offset); /* Check if the pages are resident */ if (!MmArePagesResident(NULL, BaseAddress, Length)) { if (!Wait) { return FALSE; } if (!NoRead) { NTSTATUS Status = MmMakePagesResident(NULL, BaseAddress, Length); if (!NT_SUCCESS(Status)) ExRaiseStatus(Status); } } return TRUE; } NTSTATUS NTAPI CcRosGetVacb ( PROS_SHARED_CACHE_MAP SharedCacheMap, LONGLONG FileOffset, PROS_VACB *Vacb) { PROS_VACB current; NTSTATUS Status; ULONG Refs; KIRQL OldIrql; ASSERT(SharedCacheMap); DPRINT("CcRosGetVacb()\n"); /* * Look for a VACB already mapping the same data. */ current = CcRosLookupVacb(SharedCacheMap, FileOffset); if (current == NULL) { /* * Otherwise create a new VACB. */ Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t); if (!NT_SUCCESS(Status)) { return Status; } } Refs = CcRosVacbGetRefCount(current); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); /* Move to the tail of the LRU list */ RemoveEntryList(¤t->VacbLruListEntry); InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); /* * Return the VACB to the caller. */ *Vacb = current; ASSERT(Refs > 1); return STATUS_SUCCESS; } NTSTATUS NTAPI CcRosRequestVacb ( PROS_SHARED_CACHE_MAP SharedCacheMap, LONGLONG FileOffset, PROS_VACB *Vacb) /* * FUNCTION: Request a page mapping for a shared cache map */ { ASSERT(SharedCacheMap); if (FileOffset % VACB_MAPPING_GRANULARITY != 0) { DPRINT1("Bad fileoffset %I64x should be multiple of %x", FileOffset, VACB_MAPPING_GRANULARITY); KeBugCheck(CACHE_MANAGER); } return CcRosGetVacb(SharedCacheMap, FileOffset, Vacb); } NTSTATUS CcRosInternalFreeVacb ( PROS_VACB Vacb) /* * FUNCTION: Releases a VACB associated with a shared cache map */ { NTSTATUS Status; DPRINT("Freeing VACB 0x%p\n", Vacb); #if DBG if (Vacb->SharedCacheMap->Trace) { DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb); } #endif /* Delete the mapping */ Status = MmUnmapViewInSystemSpace(Vacb->BaseAddress); if (!NT_SUCCESS(Status)) { DPRINT1("Failed to unmap VACB from System address space! Status 0x%08X\n", Status); ASSERT(FALSE); /* Proceed with the deĺetion anyway */ } if (Vacb->ReferenceCount != 0) { DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount); if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length) { DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName); } } ASSERT(Vacb->ReferenceCount == 0); ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry)); ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry)); ASSERT(IsListEmpty(&Vacb->VacbLruListEntry)); RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd); ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb); return STATUS_SUCCESS; } /* * @implemented */ VOID NTAPI CcFlushCache ( IN PSECTION_OBJECT_POINTERS SectionObjectPointers, IN PLARGE_INTEGER FileOffset OPTIONAL, IN ULONG Length, OUT PIO_STATUS_BLOCK IoStatus) { PROS_SHARED_CACHE_MAP SharedCacheMap; LARGE_INTEGER Offset; LONGLONG RemainingLength; PROS_VACB current; NTSTATUS Status; CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=0x%I64X Length=%lu\n", SectionObjectPointers, FileOffset ? FileOffset->QuadPart : 0LL, Length); if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap) { SharedCacheMap = SectionObjectPointers->SharedCacheMap; ASSERT(SharedCacheMap); if (FileOffset) { Offset = *FileOffset; RemainingLength = Length; } else { Offset.QuadPart = 0; RemainingLength = SharedCacheMap->FileSize.QuadPart; } if (IoStatus) { IoStatus->Status = STATUS_SUCCESS; IoStatus->Information = 0; } while (RemainingLength > 0) { current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart); if (current != NULL) { if (current->Dirty) { Status = CcRosFlushVacb(current); if (!NT_SUCCESS(Status) && IoStatus != NULL) { IoStatus->Status = Status; } } CcRosReleaseVacb(SharedCacheMap, current, current->Valid, FALSE, FALSE); } Offset.QuadPart += VACB_MAPPING_GRANULARITY; RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY); } } else { if (IoStatus) { IoStatus->Status = STATUS_INVALID_PARAMETER; } } } NTSTATUS NTAPI CcRosDeleteFileCache ( PFILE_OBJECT FileObject, PROS_SHARED_CACHE_MAP SharedCacheMap, PKIRQL OldIrql) /* * FUNCTION: Releases the shared cache map associated with a file object */ { PLIST_ENTRY current_entry; PROS_VACB current; LIST_ENTRY FreeList; ASSERT(SharedCacheMap); SharedCacheMap->OpenCount++; KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL); *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); SharedCacheMap->OpenCount--; if (SharedCacheMap->OpenCount == 0) { FileObject->SectionObjectPointer->SharedCacheMap = NULL; /* * Release all VACBs */ InitializeListHead(&FreeList); KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead)) { current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead); KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); RemoveEntryList(¤t->VacbLruListEntry); InitializeListHead(¤t->VacbLruListEntry); if (current->Dirty) { KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); CcRosUnmarkDirtyVacb(current, FALSE); KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); DPRINT1("Freeing dirty VACB\n"); } if (current->MappedCount != 0) { current->MappedCount = 0; NT_VERIFY(CcRosVacbDecRefCount(current) > 0); DPRINT1("Freeing mapped VACB\n"); } InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); } #if DBG SharedCacheMap->Trace = FALSE; #endif KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); if(SharedCacheMap->Section) ObDereferenceObject(SharedCacheMap->Section); ObDereferenceObject(SharedCacheMap->FileObject); while (!IsListEmpty(&FreeList)) { ULONG Refs; current_entry = RemoveTailList(&FreeList); current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry); InitializeListHead(¤t->CacheMapVacbListEntry); Refs = CcRosVacbDecRefCount(current); #if DBG // CORE-14578 if (Refs != 0) { DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart); DPRINT1("There are: %d references left\n", Refs); DPRINT1("Map: %d\n", current->MappedCount); DPRINT1("Dirty: %d\n", current->Dirty); if (FileObject->FileName.Length != 0) { DPRINT1("File was: %wZ\n", &FileObject->FileName); } else if (FileObject->FsContext != NULL && ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 && ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 && ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0) { DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100)); } else { DPRINT1("No name for the file\n"); } } #else ASSERT(Refs == 0); #endif } *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); } return STATUS_SUCCESS; } VOID NTAPI CcRosReferenceCache ( PFILE_OBJECT FileObject) { PROS_SHARED_CACHE_MAP SharedCacheMap; KIRQL OldIrql; OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; ASSERT(SharedCacheMap); ASSERT(SharedCacheMap->OpenCount != 0); SharedCacheMap->OpenCount++; KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); } VOID NTAPI CcRosRemoveIfClosed ( PSECTION_OBJECT_POINTERS SectionObjectPointer) { PROS_SHARED_CACHE_MAP SharedCacheMap; KIRQL OldIrql; DPRINT("CcRosRemoveIfClosed()\n"); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); SharedCacheMap = SectionObjectPointer->SharedCacheMap; if (SharedCacheMap && SharedCacheMap->OpenCount == 0) { CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); } KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); } NTSTATUS NTAPI CcRosReleaseFileCache ( PFILE_OBJECT FileObject) /* * FUNCTION: Called by the file system when a handle to a file object * has been closed. */ { KIRQL OldIrql; PPRIVATE_CACHE_MAP PrivateMap; PROS_SHARED_CACHE_MAP SharedCacheMap; OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) { SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; /* Closing the handle, so kill the private cache map * Before you event try to remove it from FO, always * lock the master lock, to be sure not to race * with a potential read ahead ongoing! */ PrivateMap = FileObject->PrivateCacheMap; FileObject->PrivateCacheMap = NULL; if (PrivateMap != NULL) { /* Remove it from the file */ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); RemoveEntryList(&PrivateMap->PrivateLinks); KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); /* And free it. */ if (PrivateMap != &SharedCacheMap->PrivateCacheMap) { ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP); } else { PrivateMap->NodeTypeCode = 0; } ASSERT(SharedCacheMap->OpenCount > 0); SharedCacheMap->OpenCount--; if (SharedCacheMap->OpenCount == 0) { CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql); } } } KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); return STATUS_SUCCESS; } NTSTATUS NTAPI CcRosInitializeFileCache ( PFILE_OBJECT FileObject, PCC_FILE_SIZES FileSizes, BOOLEAN PinAccess, PCACHE_MANAGER_CALLBACKS CallBacks, PVOID LazyWriterContext) /* * FUNCTION: Initializes a shared cache map for a file object */ { KIRQL OldIrql; BOOLEAN Allocated; PROS_SHARED_CACHE_MAP SharedCacheMap; DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); Allocated = FALSE; SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; if (SharedCacheMap == NULL) { Allocated = TRUE; SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList); if (SharedCacheMap == NULL) { return STATUS_INSUFFICIENT_RESOURCES; } RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap)); SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP; SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap); SharedCacheMap->FileObject = FileObject; SharedCacheMap->Callbacks = CallBacks; SharedCacheMap->LazyWriteContext = LazyWriterContext; SharedCacheMap->SectionSize = FileSizes->AllocationSize; SharedCacheMap->FileSize = FileSizes->FileSize; SharedCacheMap->PinAccess = PinAccess; SharedCacheMap->DirtyPageThreshold = 0; SharedCacheMap->DirtyPages = 0; InitializeListHead(&SharedCacheMap->PrivateList); KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); InitializeListHead(&SharedCacheMap->BcbList); SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; ObReferenceObjectByPointer(FileObject, FILE_ALL_ACCESS, NULL, KernelMode); FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; // CcRosTraceCacheMap(SharedCacheMap, TRUE); } else if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_CREATION) { /* The shared cache map is being created somewhere else. Wait for that to happen */ KEVENT Waiter; PKEVENT PreviousWaiter = SharedCacheMap->CreateEvent; KeInitializeEvent(&Waiter, NotificationEvent, FALSE); SharedCacheMap->CreateEvent = &Waiter; KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); KeWaitForSingleObject(&Waiter, Executive, KernelMode, FALSE, NULL); if (PreviousWaiter) KeSetEvent(PreviousWaiter, IO_NO_INCREMENT, FALSE); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); } if (FileObject->PrivateCacheMap == NULL) { PPRIVATE_CACHE_MAP PrivateMap; /* Allocate the private cache map for this handle */ if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0) { PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP); } else { PrivateMap = &SharedCacheMap->PrivateCacheMap; } if (PrivateMap == NULL) { /* If we also allocated the shared cache map for this file, kill it */ if (Allocated) { RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks); FileObject->SectionObjectPointer->SharedCacheMap = NULL; ObDereferenceObject(FileObject); ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap); } KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); return STATUS_INSUFFICIENT_RESOURCES; } /* Initialize it */ RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP)); PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP; PrivateMap->ReadAheadMask = PAGE_SIZE - 1; PrivateMap->FileObject = FileObject; KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock); /* Link it to the file */ KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks); KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); FileObject->PrivateCacheMap = PrivateMap; SharedCacheMap->OpenCount++; } KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); /* Create the section */ if (Allocated) { NTSTATUS Status; ASSERT(SharedCacheMap->Section == NULL); Status = MmCreateSection( &SharedCacheMap->Section, SECTION_ALL_ACCESS, NULL, &SharedCacheMap->SectionSize, PAGE_READWRITE, 0, NULL, FileObject); ASSERT(NT_SUCCESS(Status)); if (!NT_SUCCESS(Status)) { CcRosReleaseFileCache(FileObject); return Status; } OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks); SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_CREATION; if (SharedCacheMap->CreateEvent) { KeSetEvent(SharedCacheMap->CreateEvent, IO_NO_INCREMENT, FALSE); SharedCacheMap->CreateEvent = NULL; } KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); } return STATUS_SUCCESS; } /* * @implemented */ PFILE_OBJECT NTAPI CcGetFileObjectFromSectionPtrs ( IN PSECTION_OBJECT_POINTERS SectionObjectPointers) { PROS_SHARED_CACHE_MAP SharedCacheMap; CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers); if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap) { SharedCacheMap = SectionObjectPointers->SharedCacheMap; ASSERT(SharedCacheMap); return SharedCacheMap->FileObject; } return NULL; } CODE_SEG("INIT") VOID NTAPI CcInitView ( VOID) { DPRINT("CcInitView()\n"); InitializeListHead(&DirtyVacbListHead); InitializeListHead(&VacbLruListHead); InitializeListHead(&CcDeferredWrites); InitializeListHead(&CcCleanSharedCacheMapList); KeInitializeSpinLock(&CcDeferredWriteSpinLock); ExInitializeNPagedLookasideList(&iBcbLookasideList, NULL, NULL, 0, sizeof(INTERNAL_BCB), TAG_BCB, 20); ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList, NULL, NULL, 0, sizeof(ROS_SHARED_CACHE_MAP), TAG_SHARED_CACHE_MAP, 20); ExInitializeNPagedLookasideList(&VacbLookasideList, NULL, NULL, 0, sizeof(ROS_VACB), TAG_VACB, 20); CcInitCacheZeroPage(); } #if DBG && defined(KDBG) BOOLEAN ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[]) { PLIST_ENTRY ListEntry; UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File"); KdbpPrint(" Usage Summary (in kb)\n"); KdbpPrint("Shared\t\tValid\tDirty\tName\n"); /* No need to lock the spin lock here, we're in DBG */ for (ListEntry = CcCleanSharedCacheMapList.Flink; ListEntry != &CcCleanSharedCacheMapList; ListEntry = ListEntry->Flink) { PLIST_ENTRY Vacbs; ULONG Valid = 0, Dirty = 0; PROS_SHARED_CACHE_MAP SharedCacheMap; PUNICODE_STRING FileName; PWSTR Extra = L""; SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks); /* Dirty size */ Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024; /* First, count for all the associated VACB */ for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink; Vacbs != &SharedCacheMap->CacheMapVacbListHead; Vacbs = Vacbs->Flink) { PROS_VACB Vacb; Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry); if (Vacb->Valid) { Valid += VACB_MAPPING_GRANULARITY / 1024; } } /* Setup name */ if (SharedCacheMap->FileObject != NULL && SharedCacheMap->FileObject->FileName.Length != 0) { FileName = &SharedCacheMap->FileObject->FileName; } else if (SharedCacheMap->FileObject != NULL && SharedCacheMap->FileObject->FsContext != NULL && ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 && ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 && ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0) { FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100); Extra = L" (FastFAT)"; } else { FileName = &NoName; } /* And print */ KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra); } return TRUE; } BOOLEAN ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[]) { KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages, (CcTotalDirtyPages * PAGE_SIZE) / 1024); KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold, (CcDirtyPageThreshold * PAGE_SIZE) / 1024); KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages, (MmAvailablePages * PAGE_SIZE) / 1024); KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop, (MmThrottleTop * PAGE_SIZE) / 1024); KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom, (MmThrottleBottom * PAGE_SIZE) / 1024); KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total, (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024); if (CcTotalDirtyPages >= CcDirtyPageThreshold) { KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n"); } else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold) { KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n"); } else { KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n"); } return TRUE; } #endif /* EOF */