mirror of
https://github.com/reactos/reactos.git
synced 2024-12-03 16:33:37 +08:00
[NTOSKRNL] Drop the ViewLock mutex in favour of the master spin lock
This will allow Cc calls during DPC, which is required by MS FastFAT CORE-11819
This commit is contained in:
parent
b67dbdbea5
commit
4f8b041bf0
@ -15,8 +15,6 @@
|
||||
|
||||
/* GLOBALS *****************************************************************/
|
||||
|
||||
extern KGUARDED_MUTEX ViewLock;
|
||||
|
||||
NTSTATUS CcRosInternalFreeVacb(PROS_VACB Vacb);
|
||||
|
||||
/* FUNCTIONS *****************************************************************/
|
||||
@ -115,12 +113,13 @@ CcIsThereDirtyData (
|
||||
{
|
||||
PROS_VACB Vacb;
|
||||
PLIST_ENTRY Entry;
|
||||
KIRQL oldIrql;
|
||||
/* Assume no dirty data */
|
||||
BOOLEAN Dirty = FALSE;
|
||||
|
||||
CCTRACE(CC_API_DEBUG, "Vpb=%p\n", Vpb);
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
/* Browse dirty VACBs */
|
||||
for (Entry = DirtyVacbListHead.Flink; Entry != &DirtyVacbListHead; Entry = Entry->Flink)
|
||||
@ -148,7 +147,7 @@ CcIsThereDirtyData (
|
||||
}
|
||||
}
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
return Dirty;
|
||||
}
|
||||
@ -202,8 +201,8 @@ CcPurgeCacheSection (
|
||||
/* Assume success */
|
||||
Success = TRUE;
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
||||
while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
|
||||
{
|
||||
@ -246,8 +245,8 @@ CcPurgeCacheSection (
|
||||
RemoveEntryList(&Vacb->CacheMapVacbListEntry);
|
||||
InsertHeadList(&FreeList, &Vacb->CacheMapVacbListEntry);
|
||||
}
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
while (!IsListEmpty(&FreeList))
|
||||
{
|
||||
|
@ -45,8 +45,6 @@
|
||||
LIST_ENTRY DirtyVacbListHead;
|
||||
static LIST_ENTRY VacbLruListHead;
|
||||
|
||||
KGUARDED_MUTEX ViewLock;
|
||||
|
||||
NPAGED_LOOKASIDE_LIST iBcbLookasideList;
|
||||
static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
|
||||
static NPAGED_LOOKASIDE_LIST VacbLookasideList;
|
||||
@ -135,8 +133,8 @@ CcRosTraceCacheMap (
|
||||
{
|
||||
DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
|
||||
oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
||||
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
||||
@ -147,8 +145,9 @@ CcRosTraceCacheMap (
|
||||
DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
|
||||
current, current->ReferenceCount, current->Dirty, current->PageOut );
|
||||
}
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -191,13 +190,14 @@ CcRosFlushDirtyPages (
|
||||
PROS_VACB current;
|
||||
BOOLEAN Locked;
|
||||
NTSTATUS Status;
|
||||
KIRQL OldIrql;
|
||||
|
||||
DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
|
||||
|
||||
(*Count) = 0;
|
||||
|
||||
KeEnterCriticalRegion();
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
current_entry = DirtyVacbListHead.Flink;
|
||||
if (current_entry == &DirtyVacbListHead)
|
||||
@ -232,14 +232,14 @@ CcRosFlushDirtyPages (
|
||||
|
||||
ASSERT(current->Dirty);
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
Status = CcRosFlushVacb(current);
|
||||
|
||||
current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
|
||||
current->SharedCacheMap->LazyWriteContext);
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
CcRosVacbDecRefCount(current);
|
||||
|
||||
if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
|
||||
@ -270,7 +270,7 @@ CcRosFlushDirtyPages (
|
||||
current_entry = DirtyVacbListHead.Flink;
|
||||
}
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
KeLeaveCriticalRegion();
|
||||
|
||||
DPRINT("CcRosFlushDirtyPages() finished\n");
|
||||
@ -307,7 +307,7 @@ CcRosTrimCache (
|
||||
*NrFreed = 0;
|
||||
|
||||
retry:
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
current_entry = VacbLruListHead.Flink;
|
||||
while (current_entry != &VacbLruListHead)
|
||||
@ -319,7 +319,7 @@ retry:
|
||||
VacbLruListEntry);
|
||||
current_entry = current_entry->Flink;
|
||||
|
||||
KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
|
||||
/* Reference the VACB */
|
||||
CcRosVacbIncRefCount(current);
|
||||
@ -328,8 +328,8 @@ retry:
|
||||
if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty)
|
||||
{
|
||||
/* We have to break these locks because Cc sucks */
|
||||
KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
/* Page out the VACB */
|
||||
for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
|
||||
@ -340,8 +340,8 @@ retry:
|
||||
}
|
||||
|
||||
/* Reacquire the locks */
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
}
|
||||
|
||||
/* Dereference the VACB */
|
||||
@ -365,10 +365,10 @@ retry:
|
||||
(*NrFreed) += PagesFreed;
|
||||
}
|
||||
|
||||
KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
}
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
/* Try flushing pages if we haven't met our target */
|
||||
if ((Target > 0) && !FlushedPages)
|
||||
@ -459,8 +459,8 @@ CcRosLookupVacb (
|
||||
DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
|
||||
SharedCacheMap, FileOffset);
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
||||
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
||||
@ -473,8 +473,8 @@ CcRosLookupVacb (
|
||||
FileOffset))
|
||||
{
|
||||
CcRosVacbIncRefCount(current);
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
return current;
|
||||
}
|
||||
if (current->FileOffset.QuadPart > FileOffset)
|
||||
@ -482,8 +482,8 @@ CcRosLookupVacb (
|
||||
current_entry = current_entry->Flink;
|
||||
}
|
||||
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -498,8 +498,8 @@ CcRosMarkDirtyVacb (
|
||||
|
||||
SharedCacheMap = Vacb->SharedCacheMap;
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
ASSERT(!Vacb->Dirty);
|
||||
|
||||
@ -514,11 +514,9 @@ CcRosMarkDirtyVacb (
|
||||
|
||||
Vacb->Dirty = TRUE;
|
||||
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
/* Schedule a lazy writer run to now that we have dirty VACB */
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
if (!LazyWriter.ScanActive)
|
||||
{
|
||||
CcScheduleLazyWriteScan(FALSE);
|
||||
@ -539,8 +537,8 @@ CcRosUnmarkDirtyVacb (
|
||||
|
||||
if (LockViews)
|
||||
{
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
}
|
||||
|
||||
ASSERT(Vacb->Dirty);
|
||||
@ -555,8 +553,8 @@ CcRosUnmarkDirtyVacb (
|
||||
|
||||
if (LockViews)
|
||||
{
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
}
|
||||
}
|
||||
|
||||
@ -702,7 +700,7 @@ CcRosFreeUnusedVacb (
|
||||
Freed = FALSE;
|
||||
InitializeListHead(&FreeList);
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
/* Browse all the available VACB */
|
||||
current_entry = VacbLruListHead.Flink;
|
||||
@ -715,7 +713,7 @@ CcRosFreeUnusedVacb (
|
||||
VacbLruListEntry);
|
||||
current_entry = current_entry->Flink;
|
||||
|
||||
KeAcquireSpinLock(¤t->SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
|
||||
/* Only deal with unused VACB, we will free them */
|
||||
Refs = CcRosVacbGetRefCount(current);
|
||||
@ -732,11 +730,11 @@ CcRosFreeUnusedVacb (
|
||||
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
|
||||
}
|
||||
|
||||
KeReleaseSpinLock(¤t->SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock);
|
||||
|
||||
}
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
/* And now, free any of the found VACB, that'll free memory! */
|
||||
while (!IsListEmpty(&FreeList))
|
||||
@ -837,7 +835,7 @@ Retry:
|
||||
return Status;
|
||||
}
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
*Vacb = current;
|
||||
/* There is window between the call to CcRosLookupVacb
|
||||
@ -845,7 +843,7 @@ Retry:
|
||||
* file offset exist. If there is a VACB, we release
|
||||
* our newly created VACB and return the existing one.
|
||||
*/
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
|
||||
previous = NULL;
|
||||
while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
|
||||
@ -858,7 +856,7 @@ Retry:
|
||||
FileOffset))
|
||||
{
|
||||
CcRosVacbIncRefCount(current);
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
#if DBG
|
||||
if (SharedCacheMap->Trace)
|
||||
{
|
||||
@ -868,7 +866,7 @@ Retry:
|
||||
current);
|
||||
}
|
||||
#endif
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
Refs = CcRosVacbDecRefCount(*Vacb);
|
||||
ASSERT(Refs == 0);
|
||||
@ -896,9 +894,9 @@ Retry:
|
||||
{
|
||||
InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry);
|
||||
}
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
|
||||
|
||||
MI_SET_USAGE(MI_USAGE_CACHE);
|
||||
#if MI_TRACE_PFNS
|
||||
@ -938,6 +936,7 @@ CcRosGetVacb (
|
||||
PROS_VACB current;
|
||||
NTSTATUS Status;
|
||||
ULONG Refs;
|
||||
KIRQL OldIrql;
|
||||
|
||||
ASSERT(SharedCacheMap);
|
||||
|
||||
@ -961,13 +960,13 @@ CcRosGetVacb (
|
||||
|
||||
Refs = CcRosVacbGetRefCount(current);
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
/* Move to the tail of the LRU list */
|
||||
RemoveEntryList(¤t->VacbLruListEntry);
|
||||
InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry);
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
/*
|
||||
* Return information about the VACB to the caller.
|
||||
@ -1150,7 +1149,8 @@ NTSTATUS
|
||||
NTAPI
|
||||
CcRosDeleteFileCache (
|
||||
PFILE_OBJECT FileObject,
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap)
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap,
|
||||
PKIRQL OldIrql)
|
||||
/*
|
||||
* FUNCTION: Releases the shared cache map associated with a file object
|
||||
*/
|
||||
@ -1158,41 +1158,38 @@ CcRosDeleteFileCache (
|
||||
PLIST_ENTRY current_entry;
|
||||
PROS_VACB current;
|
||||
LIST_ENTRY FreeList;
|
||||
KIRQL oldIrql;
|
||||
|
||||
ASSERT(SharedCacheMap);
|
||||
|
||||
SharedCacheMap->OpenCount++;
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
|
||||
|
||||
CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
*OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
SharedCacheMap->OpenCount--;
|
||||
if (SharedCacheMap->OpenCount == 0)
|
||||
{
|
||||
KIRQL OldIrql;
|
||||
|
||||
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
|
||||
|
||||
/*
|
||||
* Release all VACBs
|
||||
*/
|
||||
InitializeListHead(&FreeList);
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
|
||||
{
|
||||
current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
|
||||
RemoveEntryList(¤t->VacbLruListEntry);
|
||||
InitializeListHead(¤t->VacbLruListEntry);
|
||||
if (current->Dirty)
|
||||
{
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
CcRosUnmarkDirtyVacb(current, FALSE);
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
DPRINT1("Freeing dirty VACB\n");
|
||||
}
|
||||
if (current->MappedCount != 0)
|
||||
@ -1203,14 +1200,14 @@ CcRosDeleteFileCache (
|
||||
}
|
||||
InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry);
|
||||
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
}
|
||||
#if DBG
|
||||
SharedCacheMap->Trace = FALSE;
|
||||
#endif
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
|
||||
ObDereferenceObject(SharedCacheMap->FileObject);
|
||||
|
||||
while (!IsListEmpty(&FreeList))
|
||||
@ -1249,12 +1246,12 @@ CcRosDeleteFileCache (
|
||||
#endif
|
||||
}
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
*OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
|
||||
|
||||
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
*OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
}
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
@ -1265,12 +1262,14 @@ CcRosReferenceCache (
|
||||
PFILE_OBJECT FileObject)
|
||||
{
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KIRQL OldIrql;
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
ASSERT(SharedCacheMap);
|
||||
ASSERT(SharedCacheMap->OpenCount != 0);
|
||||
SharedCacheMap->OpenCount++;
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
}
|
||||
|
||||
VOID
|
||||
@ -1279,14 +1278,16 @@ CcRosRemoveIfClosed (
|
||||
PSECTION_OBJECT_POINTERS SectionObjectPointer)
|
||||
{
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
KIRQL OldIrql;
|
||||
|
||||
DPRINT("CcRosRemoveIfClosed()\n");
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
SharedCacheMap = SectionObjectPointer->SharedCacheMap;
|
||||
if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
|
||||
{
|
||||
CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
|
||||
CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
|
||||
}
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
}
|
||||
|
||||
|
||||
@ -1296,7 +1297,9 @@ CcRosDereferenceCache (
|
||||
PFILE_OBJECT FileObject)
|
||||
{
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
KIRQL OldIrql;
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
|
||||
ASSERT(SharedCacheMap);
|
||||
if (SharedCacheMap->OpenCount > 0)
|
||||
@ -1304,11 +1307,17 @@ CcRosDereferenceCache (
|
||||
SharedCacheMap->OpenCount--;
|
||||
if (SharedCacheMap->OpenCount == 0)
|
||||
{
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
MmFreeSectionSegments(SharedCacheMap->FileObject);
|
||||
CcRosDeleteFileCache(FileObject, SharedCacheMap);
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
}
|
||||
|
||||
NTSTATUS
|
||||
@ -1324,7 +1333,7 @@ CcRosReleaseFileCache (
|
||||
PPRIVATE_CACHE_MAP PrivateMap;
|
||||
PROS_SHARED_CACHE_MAP SharedCacheMap;
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
|
||||
if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
|
||||
{
|
||||
@ -1335,17 +1344,15 @@ CcRosReleaseFileCache (
|
||||
* lock the master lock, to be sure not to race
|
||||
* with a potential read ahead ongoing!
|
||||
*/
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
PrivateMap = FileObject->PrivateCacheMap;
|
||||
FileObject->PrivateCacheMap = NULL;
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
if (PrivateMap != NULL)
|
||||
{
|
||||
/* Remove it from the file */
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
RemoveEntryList(&PrivateMap->PrivateLinks);
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
/* And free it. */
|
||||
if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
|
||||
@ -1362,13 +1369,19 @@ CcRosReleaseFileCache (
|
||||
SharedCacheMap->OpenCount--;
|
||||
if (SharedCacheMap->OpenCount == 0)
|
||||
{
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
MmFreeSectionSegments(SharedCacheMap->FileObject);
|
||||
CcRosDeleteFileCache(FileObject, SharedCacheMap);
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
@ -1417,7 +1430,7 @@ CcRosInitializeFileCache (
|
||||
InitializeListHead(&SharedCacheMap->BcbList);
|
||||
}
|
||||
|
||||
KeAcquireGuardedMutex(&ViewLock);
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
if (Allocated)
|
||||
{
|
||||
if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
|
||||
@ -1428,9 +1441,7 @@ CcRosInitializeFileCache (
|
||||
KernelMode);
|
||||
FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
|
||||
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1457,16 +1468,14 @@ CcRosInitializeFileCache (
|
||||
/* If we also allocated the shared cache map for this file, kill it */
|
||||
if (Allocated)
|
||||
{
|
||||
OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
|
||||
RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
FileObject->SectionObjectPointer->SharedCacheMap = NULL;
|
||||
ObDereferenceObject(FileObject);
|
||||
ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
|
||||
}
|
||||
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
return STATUS_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
|
||||
@ -1478,14 +1487,14 @@ CcRosInitializeFileCache (
|
||||
KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
|
||||
|
||||
/* Link it to the file */
|
||||
KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
|
||||
KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
|
||||
KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
|
||||
KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
|
||||
|
||||
FileObject->PrivateCacheMap = PrivateMap;
|
||||
SharedCacheMap->OpenCount++;
|
||||
}
|
||||
KeReleaseGuardedMutex(&ViewLock);
|
||||
KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
|
||||
|
||||
return STATUS_SUCCESS;
|
||||
}
|
||||
@ -1524,7 +1533,6 @@ CcInitView (
|
||||
InitializeListHead(&CcDeferredWrites);
|
||||
InitializeListHead(&CcCleanSharedCacheMapList);
|
||||
KeInitializeSpinLock(&CcDeferredWriteSpinLock);
|
||||
KeInitializeGuardedMutex(&ViewLock);
|
||||
ExInitializeNPagedLookasideList(&iBcbLookasideList,
|
||||
NULL,
|
||||
NULL,
|
||||
|
Loading…
Reference in New Issue
Block a user