diff --git "a/ntoskrnl/cc/view.c" "b/ntoskrnl/cc/view.c" index efa4c8523d4..7c13afb3220 100644 --- "a/ntoskrnl/cc/view.c" +++ "b/ntoskrnl/cc/view.c" @@ -75,13 +75,16 @@ ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line) ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line) { ULONG Refs; + BOOLEAN VacbDirty = vacb->Dirty; + BOOLEAN VacbTrace = vacb->SharedCacheMap->Trace; + BOOLEAN VacbPageOut = vacb->PageOut; Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount); - ASSERT(!(Refs == 0 && vacb->Dirty)); - if (vacb->SharedCacheMap->Trace) + ASSERT(!(Refs == 0 && VacbDirty)); + if (VacbTrace) { DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n", - file, line, vacb, Refs, vacb->Dirty, vacb->PageOut); + file, line, vacb, Refs, VacbDirty, VacbPageOut); } if (Refs == 0) @@ -466,8 +469,6 @@ CcRosTrimCache( ULONG PagesFreed; KIRQL oldIrql; LIST_ENTRY FreeList; - PFN_NUMBER Page; - ULONG i; BOOLEAN FlushedPages = FALSE; DPRINT("CcRosTrimCache(Target %lu)\n", Target); @@ -487,7 +488,6 @@ retry: current = CONTAINING_RECORD(current_entry, ROS_VACB, VacbLruListEntry); - current_entry = current_entry->Flink; KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); @@ -497,6 +497,18 @@ retry: /* Check if it's mapped and not dirty */ if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty) { + /* This code is never executed. It is left for reference only. */ +#if 1 + DPRINT1("MmPageOutPhysicalAddress unexpectedly called\n"); + ASSERT(FALSE); +#else + ULONG i; + PFN_NUMBER Page; + + /* We have to break these locks to call MmPageOutPhysicalAddress */ + KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); + KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); + /* Page out the VACB */ for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++) { @@ -504,8 +516,16 @@ retry: MmPageOutPhysicalAddress(Page); } + + /* Reacquire the locks */ + oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); +#endif } + /* Only keep iterating though the loop while the lock is held */ + current_entry = current_entry->Flink; + /* Dereference the VACB */ Refs = CcRosVacbDecRefCount(current); @@ -1122,6 +1142,8 @@ CcFlushCache ( IoStatus->Information = 0; } + KeAcquireGuardedMutex(&SharedCacheMap->FlushCacheLock); + /* * We flush the VACBs that we find here. * If there is no (dirty) VACB, it doesn't mean that there is no data to flush, so we call Mm to be sure. @@ -1140,7 +1162,8 @@ CcFlushCache ( Status = CcRosFlushVacb(vacb, &VacbIosb); if (!NT_SUCCESS(Status)) { - goto quit; + CcRosReleaseVacb(SharedCacheMap, vacb, FALSE, FALSE); + break; } DirtyVacb = TRUE; @@ -1170,7 +1193,7 @@ CcFlushCache ( } if (!NT_SUCCESS(Status)) - goto quit; + break; if (IoStatus) IoStatus->Information += MmIosb.Information; @@ -1190,6 +1213,8 @@ CcFlushCache ( FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY; } + KeReleaseGuardedMutex(&SharedCacheMap->FlushCacheLock); + quit: if (IoStatus) { @@ -1298,6 +1323,7 @@ CcRosInitializeFileCache ( KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); InitializeListHead(&SharedCacheMap->BcbList); + KeInitializeGuardedMutex(&SharedCacheMap->FlushCacheLock); SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; diff --git "a/ntoskrnl/include/internal/cc.h" "b/ntoskrnl/include/internal/cc.h" index 90c315901d1..b37c5478f05 100644 --- "a/ntoskrnl/include/internal/cc.h" +++ "b/ntoskrnl/include/internal/cc.h" @@ -193,6 +193,7 @@ typedef struct _ROS_SHARED_CACHE_MAP LIST_ENTRY CacheMapVacbListHead; BOOLEAN PinAccess; KSPIN_LOCK CacheMapLock; + KGUARDED_MUTEX FlushCacheLock; #if DBG BOOLEAN Trace; /* enable extra trace output for this cache map and it's VACBs */ #endif