diff --git a/ntoskrnl/cc/view.c b/ntoskrnl/cc/view.c index 7c13afb3220..b48246ef8a3 100644 --- a/ntoskrnl/cc/view.c +++ b/ntoskrnl/cc/view.c @@ -304,6 +304,96 @@ CcRosDeleteFileCache ( return STATUS_SUCCESS; } +static +BOOLEAN +CcpAcquireCacheMapForFlush2( + _In_ PROS_SHARED_CACHE_MAP SharedCacheMap, + _In_ BOOLEAN Wait, + _Inout_ PKIRQL OldIrql) +{ + LARGE_INTEGER Timeout; + NTSTATUS Status; + + /* Keep a reference on the shared cache map */ + ASSERT(SharedCacheMap->OpenCount > 0); + SharedCacheMap->OpenCount++; + + KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql); + + Timeout.QuadPart = 0; + Status = KeWaitForSingleObject(&SharedCacheMap->FlushEvent, + Executive, + KernelMode, + FALSE, + Wait ? NULL : &Timeout); + + *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + + if (Status == STATUS_TIMEOUT) + { + /* Release the shared cache map */ + ASSERT(SharedCacheMap->OpenCount > 1); + SharedCacheMap->OpenCount--; + + return FALSE; + } + + return TRUE; +} + +static +BOOLEAN +CcpAcquireCacheMapForFlush( + _In_ PROS_SHARED_CACHE_MAP SharedCacheMap, + _In_ BOOLEAN Wait) +{ + KIRQL OldIrql; + BOOLEAN Ret; + + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + + Ret = CcpAcquireCacheMapForFlush2(SharedCacheMap, Wait, &OldIrql); + + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); + + return Ret; +} + +static +VOID +CcpReleaseCacheMapFromFlush2( + _In_ PROS_SHARED_CACHE_MAP SharedCacheMap, + _Inout_ PKIRQL OldIrql) +{ + BOOLEAN Deleted = FALSE; + + /* Release the shared cache map */ + ASSERT(SharedCacheMap->OpenCount > 0); + if (--SharedCacheMap->OpenCount == 0) + { + /* Do cleanup */ + CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, OldIrql); + Deleted = TRUE; + } + + if (!Deleted) + KeSetEvent(&SharedCacheMap->FlushEvent, IO_NO_INCREMENT, FALSE); +} + +static +VOID +CcpReleaseCacheMapFromFlush( + _In_ PROS_SHARED_CACHE_MAP SharedCacheMap) +{ + KIRQL OldIrql; + + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + + CcpReleaseCacheMapFromFlush2(SharedCacheMap, &OldIrql); + + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); +} + NTSTATUS CcRosFlushDirtyPages ( ULONG Target, @@ -334,6 +424,7 @@ CcRosFlushDirtyPages ( PROS_SHARED_CACHE_MAP SharedCacheMap; PROS_VACB current; BOOLEAN Locked; + ULONG Refs; if (current_entry == &DirtyVacbListHead) { @@ -348,37 +439,31 @@ CcRosFlushDirtyPages ( DirtyVacbListEntry); current_entry = current_entry->Flink; - CcRosVacbIncRefCount(current); - SharedCacheMap = current->SharedCacheMap; /* When performing lazy write, don't handle temporary files */ if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) - { - CcRosVacbDecRefCount(current); continue; - } /* Don't attempt to lazy write the files that asked not to */ if (CalledFromLazy && BooleanFlagOn(SharedCacheMap->Flags, WRITEBEHIND_DISABLED)) - { - CcRosVacbDecRefCount(current); continue; - } - ASSERT(current->Dirty); + /* Keep a reference on the VACB */ + CcRosVacbIncRefCount(current); - /* Do not lazy-write the same file concurrently. Fastfat ASSERTS on that */ - if (SharedCacheMap->Flags & SHARED_CACHE_MAP_IN_LAZYWRITE) + /* Don't flush the same file concurrently. + * NOTE: We have to keep a reference on the VACB before calling CcpAcquireCacheMapForFlush2 + * because the function temporarily releases the master lock internally. + */ + if (!CcpAcquireCacheMapForFlush2(SharedCacheMap, FALSE, &OldIrql)) { - CcRosVacbDecRefCount(current); + Refs = CcRosVacbDecRefCount(current); + ASSERT(Refs > 0); continue; } - SharedCacheMap->Flags |= SHARED_CACHE_MAP_IN_LAZYWRITE; - - /* Keep a ref on the shared cache map */ - SharedCacheMap->OpenCount++; + ASSERT(current->Dirty); KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); @@ -387,12 +472,13 @@ CcRosFlushDirtyPages ( { DPRINT("Not locked!"); ASSERT(!Wait); - CcRosVacbDecRefCount(current); + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); - SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; - if (--SharedCacheMap->OpenCount == 0) - CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); + /* Release the VACB and the shared cache map */ + Refs = CcRosVacbDecRefCount(current); + ASSERT(Refs > 0); + CcpReleaseCacheMapFromFlush2(SharedCacheMap, &OldIrql); continue; } @@ -402,17 +488,12 @@ CcRosFlushDirtyPages ( SharedCacheMap->Callbacks->ReleaseFromLazyWrite(SharedCacheMap->LazyWriteContext); - /* We release the VACB before acquiring the lock again, because - * CcRosVacbDecRefCount might free the VACB, as CcRosFlushVacb dropped a - * Refcount. Freeing must be done outside of the lock. - * The refcount is decremented atomically. So this is OK. */ - CcRosVacbDecRefCount(current); OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); - SharedCacheMap->Flags &= ~SHARED_CACHE_MAP_IN_LAZYWRITE; - - if (--SharedCacheMap->OpenCount == 0) - CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql); + /* Release the VACB and the shared cache map */ + Refs = CcRosVacbDecRefCount(current); + ASSERT(Refs > 0); + CcpReleaseCacheMapFromFlush2(SharedCacheMap, &OldIrql); if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) && (Status != STATUS_MEDIA_WRITE_PROTECTED)) @@ -464,12 +545,13 @@ CcRosTrimCache( * actually freed is returned. */ { - PLIST_ENTRY current_entry; - PROS_VACB current; + PLIST_ENTRY CurrentEntry; + PROS_VACB Vacb; ULONG PagesFreed; - KIRQL oldIrql; + KIRQL OldIrql; LIST_ENTRY FreeList; BOOLEAN FlushedPages = FALSE; + ULONG Refs; DPRINT("CcRosTrimCache(Target %lu)\n", Target); @@ -477,25 +559,23 @@ CcRosTrimCache( *NrFreed = 0; -retry: - oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); +Retry: + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); - current_entry = VacbLruListHead.Flink; - while (current_entry != &VacbLruListHead) + CurrentEntry = VacbLruListHead.Flink; + while (CurrentEntry != &VacbLruListHead) { - ULONG Refs; + Vacb = CONTAINING_RECORD(CurrentEntry, + ROS_VACB, + VacbLruListEntry); - current = CONTAINING_RECORD(current_entry, - ROS_VACB, - VacbLruListEntry); + KeAcquireSpinLockAtDpcLevel(&Vacb->SharedCacheMap->CacheMapLock); - KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); - - /* Reference the VACB */ - CcRosVacbIncRefCount(current); + /* Only keep iterating though the loop while the lock is held */ + CurrentEntry = CurrentEntry->Flink; /* Check if it's mapped and not dirty */ - if (InterlockedCompareExchange((PLONG)¤t->MappedCount, 0, 0) > 0 && !current->Dirty) + if (InterlockedCompareExchange((PLONG)&Vacb->MappedCount, 0, 0) > 0 && !Vacb->Dirty) { /* This code is never executed. It is left for reference only. */ #if 1 @@ -505,41 +585,43 @@ retry: ULONG i; PFN_NUMBER Page; + /* Keep a reference on the VACB */ + CcRosVacbIncRefCount(Vacb); + /* We have to break these locks to call MmPageOutPhysicalAddress */ - KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); - KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); + KeReleaseSpinLockFromDpcLevel(&Vacb->SharedCacheMap->CacheMapLock); + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); /* Page out the VACB */ for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++) { - Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT); + Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)Vacb->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT); MmPageOutPhysicalAddress(Page); } /* Reacquire the locks */ - oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); - KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + KeAcquireSpinLockAtDpcLevel(&Vacb->SharedCacheMap->CacheMapLock); + + /* Dereference the VACB */ + CcRosVacbDecRefCount(Vacb); #endif } - /* Only keep iterating though the loop while the lock is held */ - current_entry = current_entry->Flink; - - /* Dereference the VACB */ - Refs = CcRosVacbDecRefCount(current); - - /* Check if we can free this entry now */ + /* Check if we can free this VACB now */ + Refs = CcRosVacbGetRefCount(Vacb); if (Refs < 2) { - ASSERT(!current->Dirty); - ASSERT(!current->MappedCount); + ASSERT(!Vacb->Dirty); + ASSERT(!Vacb->MappedCount); ASSERT(Refs == 1); - RemoveEntryList(¤t->CacheMapVacbListEntry); - RemoveEntryList(¤t->VacbLruListEntry); - InitializeListHead(¤t->VacbLruListEntry); - InsertHeadList(&FreeList, ¤t->CacheMapVacbListEntry); + /* Unlink the VACB and mark for free */ + RemoveEntryList(&Vacb->VacbLruListEntry); + InitializeListHead(&Vacb->VacbLruListEntry); + RemoveEntryList(&Vacb->CacheMapVacbListEntry); + InsertHeadList(&FreeList, &Vacb->CacheMapVacbListEntry); /* Calculate how many pages we freed for Mm */ PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target); @@ -547,10 +629,10 @@ retry: (*NrFreed) += PagesFreed; } - KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); + KeReleaseSpinLockFromDpcLevel(&Vacb->SharedCacheMap->CacheMapLock); } - KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); /* Try flushing pages if we haven't met our target */ if ((Target > 0) && !FlushedPages) @@ -567,20 +649,20 @@ retry: { /* Try again after flushing dirty pages */ DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed); - goto retry; + goto Retry; } } while (!IsListEmpty(&FreeList)) { - ULONG Refs; + CurrentEntry = RemoveHeadList(&FreeList); + Vacb = CONTAINING_RECORD(CurrentEntry, + ROS_VACB, + CacheMapVacbListEntry); - current_entry = RemoveHeadList(&FreeList); - current = CONTAINING_RECORD(current_entry, - ROS_VACB, - CacheMapVacbListEntry); - InitializeListHead(¤t->CacheMapVacbListEntry); - Refs = CcRosVacbDecRefCount(current); + InitializeListHead(&Vacb->CacheMapVacbListEntry); + + Refs = CcRosVacbDecRefCount(Vacb); ASSERT(Refs == 0); } @@ -618,48 +700,60 @@ CcRosReleaseVacb ( return STATUS_SUCCESS; } -/* Returns with VACB Lock Held! */ +/* Returns with a VACB reference held! */ PROS_VACB CcRosLookupVacb ( PROS_SHARED_CACHE_MAP SharedCacheMap, LONGLONG FileOffset) { - PLIST_ENTRY current_entry; - PROS_VACB current; - KIRQL oldIrql; + PLIST_ENTRY CurrentEntry; + PROS_VACB Vacb; + KIRQL OldIrql; + PROS_VACB Ret = NULL; ASSERT(SharedCacheMap); DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n", SharedCacheMap, FileOffset); - oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); - current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; - while (current_entry != &SharedCacheMap->CacheMapVacbListHead) + CurrentEntry = SharedCacheMap->CacheMapVacbListHead.Flink; + while (CurrentEntry != &SharedCacheMap->CacheMapVacbListHead) { - current = CONTAINING_RECORD(current_entry, - ROS_VACB, - CacheMapVacbListEntry); - if (IsPointInRange(current->FileOffset.QuadPart, + Vacb = CONTAINING_RECORD(CurrentEntry, + ROS_VACB, + CacheMapVacbListEntry); + if (IsPointInRange(Vacb->FileOffset.QuadPart, VACB_MAPPING_GRANULARITY, FileOffset)) { - CcRosVacbIncRefCount(current); - KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); - KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); - return current; + Ret = Vacb; + break; } - if (current->FileOffset.QuadPart > FileOffset) + + if (Vacb->FileOffset.QuadPart > FileOffset) break; - current_entry = current_entry->Flink; + + CurrentEntry = CurrentEntry->Flink; } KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); - KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); - return NULL; + if (Ret) + { + /* Move to the tail of the LRU list */ + RemoveEntryList(&Ret->VacbLruListEntry); + InsertTailList(&VacbLruListHead, &Ret->VacbLruListEntry); + + /* Reference it to allow release */ + CcRosVacbIncRefCount(Ret); + } + + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); + + return Ret; } VOID @@ -737,57 +831,59 @@ BOOLEAN CcRosFreeOneUnusedVacb( VOID) { - KIRQL oldIrql; - PLIST_ENTRY current_entry; - PROS_VACB to_free = NULL; + KIRQL OldIrql; + PLIST_ENTRY CurrentEntry; + PROS_VACB ToFree = NULL; + ULONG Refs; - oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); - /* Browse all the available VACB */ - current_entry = VacbLruListHead.Flink; - while ((current_entry != &VacbLruListHead) && (to_free == NULL)) + /* Browse all the available VACBs */ + CurrentEntry = VacbLruListHead.Flink; + while ((CurrentEntry != &VacbLruListHead) && (ToFree == NULL)) { - ULONG Refs; - PROS_VACB current; + PROS_VACB Vacb; - current = CONTAINING_RECORD(current_entry, - ROS_VACB, - VacbLruListEntry); + Vacb = CONTAINING_RECORD(CurrentEntry, + ROS_VACB, + VacbLruListEntry); - KeAcquireSpinLockAtDpcLevel(¤t->SharedCacheMap->CacheMapLock); + KeAcquireSpinLockAtDpcLevel(&Vacb->SharedCacheMap->CacheMapLock); - /* Only deal with unused VACB, we will free them */ - Refs = CcRosVacbGetRefCount(current); + /* Only keep iterating though the loop while the lock is held */ + CurrentEntry = CurrentEntry->Flink; + + /* Only deal with unused VACBs, we will free them */ + Refs = CcRosVacbGetRefCount(Vacb); if (Refs < 2) { - ASSERT(!current->Dirty); - ASSERT(!current->MappedCount); + ASSERT(!Vacb->Dirty); + ASSERT(!Vacb->MappedCount); ASSERT(Refs == 1); - /* Reset it, this is the one we want to free */ - RemoveEntryList(¤t->CacheMapVacbListEntry); - InitializeListHead(¤t->CacheMapVacbListEntry); - RemoveEntryList(¤t->VacbLruListEntry); - InitializeListHead(¤t->VacbLruListEntry); + /* Unlink it, this is the one we want to free */ + RemoveEntryList(&Vacb->VacbLruListEntry); + InitializeListHead(&Vacb->VacbLruListEntry); + RemoveEntryList(&Vacb->CacheMapVacbListEntry); + InitializeListHead(&Vacb->CacheMapVacbListEntry); - to_free = current; + ToFree = Vacb; } - KeReleaseSpinLockFromDpcLevel(¤t->SharedCacheMap->CacheMapLock); - - current_entry = current_entry->Flink; + KeReleaseSpinLockFromDpcLevel(&Vacb->SharedCacheMap->CacheMapLock); } - KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); - /* And now, free the VACB that we found, if any. */ - if (to_free == NULL) + /* And now, free the VACB that we found, if any */ + if (ToFree == NULL) { return FALSE; } + Refs = CcRosVacbDecRefCount(ToFree); /* This must be its last ref */ - NT_VERIFY(CcRosVacbDecRefCount(to_free) == 0); + ASSERT(Refs == 0); return TRUE; } @@ -799,36 +895,34 @@ CcRosCreateVacb ( LONGLONG FileOffset, PROS_VACB *Vacb) { - PROS_VACB current; - PROS_VACB previous; - PLIST_ENTRY current_entry; + PROS_VACB CurrentVacb; + PROS_VACB PreviousVacb; + PLIST_ENTRY CurrentEntry; NTSTATUS Status; - KIRQL oldIrql; + KIRQL OldIrql; ULONG Refs; SIZE_T ViewSize = VACB_MAPPING_GRANULARITY; ASSERT(SharedCacheMap); - DPRINT("CcRosCreateVacb()\n"); - - current = ExAllocateFromNPagedLookasideList(&VacbLookasideList); - current->BaseAddress = NULL; - current->Dirty = FALSE; - current->PageOut = FALSE; - current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); - current->SharedCacheMap = SharedCacheMap; - current->MappedCount = 0; - current->ReferenceCount = 0; - InitializeListHead(¤t->CacheMapVacbListEntry); - InitializeListHead(¤t->DirtyVacbListEntry); - InitializeListHead(¤t->VacbLruListEntry); + CurrentVacb = ExAllocateFromNPagedLookasideList(&VacbLookasideList); + CurrentVacb->BaseAddress = NULL; + CurrentVacb->Dirty = FALSE; + CurrentVacb->PageOut = FALSE; + CurrentVacb->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY); + CurrentVacb->SharedCacheMap = SharedCacheMap; + CurrentVacb->MappedCount = 0; + CurrentVacb->ReferenceCount = 0; + InitializeListHead(&CurrentVacb->CacheMapVacbListEntry); + InitializeListHead(&CurrentVacb->DirtyVacbListEntry); + InitializeListHead(&CurrentVacb->VacbLruListEntry); - CcRosVacbIncRefCount(current); + CcRosVacbIncRefCount(CurrentVacb); while (TRUE) { /* Map VACB in system space */ - Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, ¤t->BaseAddress, &ViewSize, ¤t->FileOffset, 0); + Status = MmMapViewInSystemSpaceEx(SharedCacheMap->Section, &CurrentVacb->BaseAddress, &ViewSize, &CurrentVacb->FileOffset, 0); if (NT_SUCCESS(Status)) { break; @@ -840,7 +934,7 @@ CcRosCreateVacb ( */ if (!CcRosFreeOneUnusedVacb()) { - ExFreeToNPagedLookasideList(&VacbLookasideList, current); + ExFreeToNPagedLookasideList(&VacbLookasideList, CurrentVacb); return Status; } } @@ -849,31 +943,32 @@ CcRosCreateVacb ( if (SharedCacheMap->Trace) { DPRINT1("CacheMap 0x%p: new VACB: 0x%p, file offset %I64d, BaseAddress %p\n", - SharedCacheMap, current, current->FileOffset.QuadPart, current->BaseAddress); + SharedCacheMap, CurrentVacb, CurrentVacb->FileOffset.QuadPart, CurrentVacb->BaseAddress); } #endif - oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); + + *Vacb = CurrentVacb; + + KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); - *Vacb = current; /* There is window between the call to CcRosLookupVacb * and CcRosCreateVacb. We must check if a VACB for the * file offset exist. If there is a VACB, we release * our newly created VACB and return the existing one. */ - KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock); - current_entry = SharedCacheMap->CacheMapVacbListHead.Flink; - previous = NULL; - while (current_entry != &SharedCacheMap->CacheMapVacbListHead) + CurrentEntry = SharedCacheMap->CacheMapVacbListHead.Flink; + PreviousVacb = NULL; + while (CurrentEntry != &SharedCacheMap->CacheMapVacbListHead) { - current = CONTAINING_RECORD(current_entry, - ROS_VACB, - CacheMapVacbListEntry); - if (IsPointInRange(current->FileOffset.QuadPart, + CurrentVacb = CONTAINING_RECORD(CurrentEntry, + ROS_VACB, + CacheMapVacbListEntry); + if (IsPointInRange(CurrentVacb->FileOffset.QuadPart, VACB_MAPPING_GRANULARITY, FileOffset)) { - CcRosVacbIncRefCount(current); KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); #if DBG if (SharedCacheMap->Trace) @@ -881,44 +976,59 @@ CcRosCreateVacb ( DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n", SharedCacheMap, (*Vacb), - current); + CurrentVacb); } #endif - KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); + /* Move to the tail of the LRU list */ + RemoveEntryList(&CurrentVacb->VacbLruListEntry); + InsertTailList(&VacbLruListHead, &CurrentVacb->VacbLruListEntry); + + /* Reference it to allow release */ + CcRosVacbIncRefCount(CurrentVacb); + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); + + /* Free the newly created VACB */ Refs = CcRosVacbDecRefCount(*Vacb); ASSERT(Refs == 0); - *Vacb = current; + /* Return the existing VACB to the caller */ + *Vacb = CurrentVacb; return STATUS_SUCCESS; } - if (current->FileOffset.QuadPart < FileOffset) + + if (CurrentVacb->FileOffset.QuadPart < FileOffset) { - ASSERT(previous == NULL || - previous->FileOffset.QuadPart < current->FileOffset.QuadPart); - previous = current; + ASSERT(PreviousVacb == NULL || + PreviousVacb->FileOffset.QuadPart < CurrentVacb->FileOffset.QuadPart); + PreviousVacb = CurrentVacb; } - if (current->FileOffset.QuadPart > FileOffset) + + if (CurrentVacb->FileOffset.QuadPart > FileOffset) break; - current_entry = current_entry->Flink; + + CurrentEntry = CurrentEntry->Flink; } - /* There was no existing VACB. */ - current = *Vacb; - if (previous) + + /* There was no existing VACB */ + CurrentVacb = *Vacb; + if (PreviousVacb) { - InsertHeadList(&previous->CacheMapVacbListEntry, ¤t->CacheMapVacbListEntry); + InsertHeadList(&PreviousVacb->CacheMapVacbListEntry, &CurrentVacb->CacheMapVacbListEntry); } else { - InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, ¤t->CacheMapVacbListEntry); + InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &CurrentVacb->CacheMapVacbListEntry); } + KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock); - InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); + + InsertTailList(&VacbLruListHead, &CurrentVacb->VacbLruListEntry); /* Reference it to allow release */ - CcRosVacbIncRefCount(current); + CcRosVacbIncRefCount(CurrentVacb); - KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql); + KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); return Status; } @@ -976,48 +1086,29 @@ CcRosGetVacb ( LONGLONG FileOffset, PROS_VACB *Vacb) { - PROS_VACB current; + PROS_VACB CurrentVacb; NTSTATUS Status; ULONG Refs; - KIRQL OldIrql; ASSERT(SharedCacheMap); - DPRINT("CcRosGetVacb()\n"); - - /* - * Look for a VACB already mapping the same data. - */ - current = CcRosLookupVacb(SharedCacheMap, FileOffset); - if (current == NULL) + /* Look for a VACB already mapping the same data */ + CurrentVacb = CcRosLookupVacb(SharedCacheMap, FileOffset); + if (CurrentVacb == NULL) { - /* - * Otherwise create a new VACB. - */ - Status = CcRosCreateVacb(SharedCacheMap, FileOffset, ¤t); + /* Otherwise create a new VACB */ + Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &CurrentVacb); if (!NT_SUCCESS(Status)) { return Status; } } - Refs = CcRosVacbGetRefCount(current); - - OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock); - - /* Move to the tail of the LRU list */ - RemoveEntryList(¤t->VacbLruListEntry); - InsertTailList(&VacbLruListHead, ¤t->VacbLruListEntry); - - KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql); - - /* - * Return the VACB to the caller. - */ - *Vacb = current; - + Refs = CcRosVacbGetRefCount(CurrentVacb); ASSERT(Refs > 1); + /* Return the VACB to the caller */ + *Vacb = CurrentVacb; return STATUS_SUCCESS; } @@ -1142,7 +1233,7 @@ CcFlushCache ( IoStatus->Information = 0; } - KeAcquireGuardedMutex(&SharedCacheMap->FlushCacheLock); + CcpAcquireCacheMapForFlush(SharedCacheMap, TRUE); /* * We flush the VACBs that we find here. @@ -1213,7 +1304,7 @@ CcFlushCache ( FlushStart -= FlushStart % VACB_MAPPING_GRANULARITY; } - KeReleaseGuardedMutex(&SharedCacheMap->FlushCacheLock); + CcpReleaseCacheMapFromFlush(SharedCacheMap); quit: if (IoStatus) @@ -1323,7 +1414,7 @@ CcRosInitializeFileCache ( KeInitializeSpinLock(&SharedCacheMap->CacheMapLock); InitializeListHead(&SharedCacheMap->CacheMapVacbListHead); InitializeListHead(&SharedCacheMap->BcbList); - KeInitializeGuardedMutex(&SharedCacheMap->FlushCacheLock); + KeInitializeEvent(&SharedCacheMap->FlushEvent, SynchronizationEvent, TRUE); SharedCacheMap->Flags = SHARED_CACHE_MAP_IN_CREATION; diff --git a/ntoskrnl/include/internal/cc.h b/ntoskrnl/include/internal/cc.h index b37c5478f05..430959f70ec 100644 --- a/ntoskrnl/include/internal/cc.h +++ b/ntoskrnl/include/internal/cc.h @@ -193,7 +193,7 @@ typedef struct _ROS_SHARED_CACHE_MAP LIST_ENTRY CacheMapVacbListHead; BOOLEAN PinAccess; KSPIN_LOCK CacheMapLock; - KGUARDED_MUTEX FlushCacheLock; + KEVENT FlushEvent; #if DBG BOOLEAN Trace; /* enable extra trace output for this cache map and it's VACBs */ #endif @@ -202,7 +202,6 @@ typedef struct _ROS_SHARED_CACHE_MAP #define READAHEAD_DISABLED 0x1 #define WRITEBEHIND_DISABLED 0x2 #define SHARED_CACHE_MAP_IN_CREATION 0x4 -#define SHARED_CACHE_MAP_IN_LAZYWRITE 0x8 typedef struct _ROS_VACB {