diff --git a/ntoskrnl/mm/ARM3/mdlsup.c b/ntoskrnl/mm/ARM3/mdlsup.c index 76b06f0c72e..cf9ae9359b7 100644 --- a/ntoskrnl/mm/ARM3/mdlsup.c +++ b/ntoskrnl/mm/ARM3/mdlsup.c @@ -529,7 +529,7 @@ MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress, else { // - // Conver to internal caching attribute + // Convert to internal caching attribute // CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType]; } @@ -1622,29 +1622,297 @@ MmAdvanceMdl(IN PMDL Mdl, } /* - * @unimplemented + * @implemented */ PVOID NTAPI MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress, IN ULONG PoolTag, - IN PMDL MemoryDescriptorList, + IN PMDL Mdl, IN MEMORY_CACHING_TYPE CacheType) { - UNIMPLEMENTED; - return 0; + PPFN_NUMBER MdlPages, LastPage; + PFN_COUNT PageCount; + BOOLEAN IsIoMapping; + MI_PFN_CACHE_ATTRIBUTE CacheAttribute; + PMMPTE PointerPte; + MMPTE TempPte; + + // + // Sanity check + // + ASSERT(Mdl->ByteCount != 0); + + // + // Get the list of pages and count + // + MdlPages = MmGetMdlPfnArray(Mdl); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl), + Mdl->ByteCount); + LastPage = MdlPages + PageCount; + + // + // Sanity checks + // + ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | + MDL_SOURCE_IS_NONPAGED_POOL | + MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); + ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0); + + // + // Get the correct cache type + // + IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0; + CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; + + // + // Get the first PTE we reserved + // + ASSERT(MappingAddress); + PointerPte = MiAddressToPte(MappingAddress) - 2; + ASSERT(!PointerPte[0].u.Hard.Valid && + !PointerPte[1].u.Hard.Valid); + + // + // Verify that the pool tag matches + // + TempPte.u.Long = PoolTag; + TempPte.u.Hard.Valid = 0; + if (PointerPte[1].u.Long != TempPte.u.Long) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x104, /* Trying to map an address it does not own */ + (ULONG_PTR)MappingAddress, + PoolTag, + PointerPte[1].u.Long); + } + + // + // We must have a size, and our helper PTEs must be invalid + // + if (PointerPte[0].u.Long < (3 << 1)) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x105, /* Trying to map an invalid address */ + (ULONG_PTR)MappingAddress, + PoolTag, + (ULONG_PTR)_ReturnAddress()); + } + + // + // If the mapping isn't big enough, fail + // + if ((PointerPte[0].u.Long >> 1) - 2 < PageCount) + { + return NULL; + } + + // + // Skip our two helper PTEs + // + PointerPte += 2; + + // + // Get the template + // + TempPte = ValidKernelPte; + switch (CacheAttribute) + { + case MiNonCached: + + // + // Disable caching + // + MI_PAGE_DISABLE_CACHE(&TempPte); + MI_PAGE_WRITE_THROUGH(&TempPte); + break; + + case MiWriteCombined: + + // + // Enable write combining + // + MI_PAGE_DISABLE_CACHE(&TempPte); + MI_PAGE_WRITE_COMBINED(&TempPte); + break; + + default: + // + // Nothing to do + // + break; + } + + // + // Loop all PTEs + // + do + { + // + // We're done here + // + if (*MdlPages == LIST_HEAD) break; + + // + // Write the PTE + // + TempPte.u.Hard.PageFrameNumber = *MdlPages; + MI_WRITE_VALID_PTE(PointerPte++, TempPte); + } while (++MdlPages < LastPage); + + // + // Mark it as mapped + // + ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); + Mdl->MappedSystemVa = MappingAddress; + Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; + + // + // Check if it was partial + // + if (Mdl->MdlFlags & MDL_PARTIAL) + { + // + // Write the appropriate flag here too + // + Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; + } + + // + // Return the mapped address + // + return (PVOID)((ULONG_PTR)MappingAddress + Mdl->ByteOffset); } /* - * @unimplemented + * @implemented */ VOID NTAPI MmUnmapReservedMapping(IN PVOID BaseAddress, IN ULONG PoolTag, - IN PMDL MemoryDescriptorList) + IN PMDL Mdl) { - UNIMPLEMENTED; + PVOID Base; + PFN_COUNT PageCount, ExtraPageCount; + PPFN_NUMBER MdlPages; + PMMPTE PointerPte; + MMPTE TempPte; + + // + // Sanity check + // + ASSERT(Mdl->ByteCount != 0); + ASSERT(BaseAddress > MM_HIGHEST_USER_ADDRESS); + + // + // Get base and count information + // + Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); + + // + // Sanity checks + // + ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); + ASSERT(PageCount != 0); + ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); + + + // + // Get the first PTE we reserved + // + PointerPte = MiAddressToPte(BaseAddress) - 2; + ASSERT(!PointerPte[0].u.Hard.Valid && + !PointerPte[1].u.Hard.Valid); + + // + // Verify that the pool tag matches + // + TempPte.u.Long = PoolTag; + TempPte.u.Hard.Valid = 0; + if (PointerPte[1].u.Long != TempPte.u.Long) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x108, /* Trying to unmap an address it does not own */ + (ULONG_PTR)BaseAddress, + PoolTag, + PointerPte[1].u.Long); + } + + // + // We must have a size + // + if (PointerPte[0].u.Long < (3 << 1)) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x109, /* Mapping apparently empty */ + (ULONG_PTR)BaseAddress, + PoolTag, + (ULONG_PTR)_ReturnAddress()); + } + + // + // Skip our two helper PTEs + // + PointerPte += 2; + + // + // This should be a resident system PTE + // + ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); + ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); + ASSERT(PointerPte->u.Hard.Valid == 1); + + // TODO: check the MDL range makes sense with regard to the mapping range + // TODO: check if any of them are already zero + // TODO: check if any outside the MDL range are nonzero + // TODO: find out what to do with extra PTEs + + // + // Check if the caller wants us to free advanced pages + // + if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) + { + // + // Get the MDL page array + // + MdlPages = MmGetMdlPfnArray(Mdl); + + /* Number of extra pages stored after the PFN array */ + ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount); + + // + // Do the math + // + PageCount += ExtraPageCount; + PointerPte -= ExtraPageCount; + ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); + ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); + + // + // Get the new base address + // + BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - + (ExtraPageCount << PAGE_SHIFT)); + } + + // + // Zero the PTEs + // + RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE)); + + // + // Flush the TLB + // + KeFlushEntireTb(TRUE, TRUE); + + // + // Remove flags + // + Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | + MDL_PARTIAL_HAS_BEEN_MAPPED | + MDL_FREE_EXTRA_PTES); } /* diff --git a/ntoskrnl/mm/ARM3/miarm.h b/ntoskrnl/mm/ARM3/miarm.h index 7515b4c506b..7bca99f70c4 100644 --- a/ntoskrnl/mm/ARM3/miarm.h +++ b/ntoskrnl/mm/ARM3/miarm.h @@ -1002,7 +1002,8 @@ MI_WRITE_INVALID_PTE(IN PMMPTE PointerPte, { /* Write the invalid PTE */ ASSERT(InvalidPte.u.Hard.Valid == 0); - ASSERT(InvalidPte.u.Long != 0); + /* Added by Timo Kreuzer in https://github.com/reactos/reactos/commit/4019985 */ +// ASSERT(InvalidPte.u.Long != 0); *PointerPte = InvalidPte; } diff --git a/ntoskrnl/mm/ARM3/pool.c b/ntoskrnl/mm/ARM3/pool.c index badc8a3e4d7..c33fa8b16ad 100644 --- a/ntoskrnl/mm/ARM3/pool.c +++ b/ntoskrnl/mm/ARM3/pool.c @@ -1580,6 +1580,10 @@ MmAllocateMappingAddress( PMMPTE PointerPte; MMPTE TempPte; + /* Fast exit if PoolTag is NULL */ + if (!PoolTag) + return NULL; + /* How many PTEs does the caller want? */ SizeInPages = BYTES_TO_PAGES(NumberOfBytes); if (SizeInPages == 0) @@ -1604,8 +1608,7 @@ MmAllocateMappingAddress( } ASSERT(SizeInPages <= MM_EMPTY_PTE_LIST); - TempPte.u.Long = 0; - TempPte.u.List.NextEntry = SizeInPages; + TempPte.u.Long = SizeInPages << 1; MI_WRITE_INVALID_PTE(&PointerPte[0], TempPte); TempPte.u.Long = PoolTag; TempPte.u.Hard.Valid = 0; @@ -1658,8 +1661,8 @@ MmFreeMappingAddress( } /* We must have a size */ - SizeInPages = PointerPte[0].u.List.NextEntry; - if (SizeInPages < 3) + SizeInPages = PointerPte[0].u.Long >> 1; + if (PointerPte[0].u.Long < (3 << 1)) { KeBugCheckEx(SYSTEM_PTE_MISUSE, PTE_MAPPING_EMPTY, /* Mapping apparently empty */