Index: mdlsup.c =================================================================== --- ntoskrnl/mm/ARM3/mdlsup.c (revision 69769) +++ ntoskrnl/mm/ARM3/mdlsup.c (working copy) @@ -182,7 +182,7 @@ else { // - // Conver to internal caching attribute + // Convert to internal caching attribute // CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType]; } @@ -1282,11 +1282,169 @@ NTAPI MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress, IN ULONG PoolTag, - IN PMDL MemoryDescriptorList, + IN PMDL Mdl, IN MEMORY_CACHING_TYPE CacheType) { - UNIMPLEMENTED; - return 0; + PPFN_NUMBER MdlPages, LastPage; + PFN_COUNT PageCount; + BOOLEAN IsIoMapping; + MI_PFN_CACHE_ATTRIBUTE CacheAttribute; + PMMPTE PointerPte; + MMPTE TempPte; + + // + // Sanity check + // + ASSERT(Mdl->ByteCount != 0); + + // + // Get the list of pages and count + // + MdlPages = MmGetMdlPfnArray(Mdl); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl), + Mdl->ByteCount); + LastPage = MdlPages + PageCount; + + // + // Sanity checks + // + ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | + MDL_SOURCE_IS_NONPAGED_POOL | + MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); + ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0); + + // + // Get the correct cache type + // + IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0; + CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; + + // + // Get the first PTE we reserved + // + PointerPte = MiAddressToPte(MappingAddress) - 2; + ASSERT(!PointerPte[0].u.Hard.Valid && + !PointerPte[1].u.Hard.Valid); + + // + // Verify that the pool tag matches + // + TempPte.u.Long = PoolTag; + TempPte.u.Hard.Valid = 0; + if (PointerPte[1].u.Long != TempPte.u.Long) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x104, /* Trying to map an address it does not own */ + (ULONG_PTR)MappingAddress, + PoolTag, + PointerPte[1].u.Long); + } + + // + // We must have a size, and our helper PTEs must be invalid + // + if (PointerPte[0].u.Long < (3 << 1)) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x105, /* Trying to map an invalid address */ + (ULONG_PTR)MappingAddress, + PoolTag, + (ULONG_PTR)_ReturnAddress()); + } + + + // + // If the mapping isn't big enough, fail + // + if ((PointerPte[0].u.Long >> 1) - 2 < PageCount) + { + ASSERT((PointerPte[0].u.Long >> 1) - 2 >= PageCount); + return NULL; + } + + // + // Skip our two helper PTEs + // + PointerPte += 2; + + // + // Get the template + // + TempPte = ValidKernelPte; + switch (CacheAttribute) + { + case MiNonCached: + + // + // Disable caching + // + MI_PAGE_DISABLE_CACHE(&TempPte); + MI_PAGE_WRITE_THROUGH(&TempPte); + break; + + case MiWriteCombined: + + // + // Enable write combining + // + MI_PAGE_DISABLE_CACHE(&TempPte); + MI_PAGE_WRITE_COMBINED(&TempPte); + break; + + default: + // + // Nothing to do + // + break; + } + + // + // Loop all PTEs + // + do + { + // + // We're done here + // + if (*MdlPages == LIST_HEAD) break; + + // + // Write the PTE + // + TempPte.u.Hard.PageFrameNumber = *MdlPages; + if (PointerPte->u.Long != 0) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x107, /* Mapping not properly reserved */ + (ULONG_PTR)MappingAddress, + (ULONG_PTR)PointerPte, + (ULONG_PTR)LastPage); + } + MI_WRITE_VALID_PTE(PointerPte++, TempPte); + } while (++MdlPages < LastPage); + + // + // Mark it as mapped + // + ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); + Mdl->MappedSystemVa = MappingAddress; + Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; + + // + // Check if it was partial + // + if (Mdl->MdlFlags & MDL_PARTIAL) + { + // + // Write the appropriate flag here too + // + Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; + } + + // + // Return the mapped address + // + return (PVOID)((ULONG_PTR)MappingAddress + Mdl->ByteOffset); } /* @@ -1296,9 +1454,118 @@ NTAPI MmUnmapReservedMapping(IN PVOID BaseAddress, IN ULONG PoolTag, - IN PMDL MemoryDescriptorList) + IN PMDL Mdl) { - UNIMPLEMENTED; + PVOID Base; + PFN_COUNT PageCount, ExtraPageCount; + PPFN_NUMBER MdlPages; + PMMPTE PointerPte; + MMPTE TempPte; + + // + // Sanity check + // + ASSERT(Mdl->ByteCount != 0); + ASSERT(BaseAddress > MM_HIGHEST_USER_ADDRESS); + + // + // Get base and count information + // + Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); + PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); + + // + // Sanity checks + // + ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); + ASSERT(PageCount != 0); + ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); + + + // + // Get the first PTE we reserved + // + PointerPte = MiAddressToPte(BaseAddress) - 2; + ASSERT(!PointerPte[0].u.Hard.Valid && + !PointerPte[1].u.Hard.Valid); + + // + // Verify that the pool tag matches + // + TempPte.u.Long = PoolTag; + TempPte.u.Hard.Valid = 0; + if (PointerPte[1].u.Long != TempPte.u.Long) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x108, /* Trying to unmap an address it does not own */ + (ULONG_PTR)BaseAddress, + PoolTag, + PointerPte[1].u.Long); + } + + // + // We must have a size + // + if (PointerPte[0].u.Long < (3 << 1)) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x109, /* Mapping apparently empty */ + (ULONG_PTR)BaseAddress, + PoolTag, + (ULONG_PTR)_ReturnAddress()); + } + + // + // Skip our two helper PTEs + // + PointerPte += 2; + + // + // This should be a resident system PTE + // + ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); + ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); + ASSERT(PointerPte->u.Hard.Valid == 1); + + // TODO: check the MDL range makes sense with regard to the mapping range + // TODO: zero the PTEs + // TODO: check if any of them are already zero + // TODO: check if any outside the MDL range are nonzero + // TODO: find out what to do with extra PTEs + // + // Check if the caller wants us to free advanced pages + // + if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) + { + // + // Get the MDL page array + // + MdlPages = MmGetMdlPfnArray(Mdl); + + /* Number of extra pages stored after the PFN array */ + ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount); + + // + // Do the math + // + PageCount += ExtraPageCount; + PointerPte -= ExtraPageCount; + ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); + ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); + + // + // Get the new base address + // + BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - + (ExtraPageCount << PAGE_SHIFT)); + } + + // + // Remove flags + // + Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | + MDL_PARTIAL_HAS_BEEN_MAPPED | + MDL_FREE_EXTRA_PTES); } /* @@ -1335,10 +1602,27 @@ IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation) { - UNIMPLEMENTED; + KAPC_STATE ApcState; + + if (Process != PsGetCurrentProcess()) + { + KeStackAttachProcess(&Process->Pcb, &ApcState); + } + + _SEH2_TRY + { + MmProbeAndLockPages(MemoryDescriptorList, AccessMode, Operation); + } + _SEH2_FINALLY + { + if (Process != PsGetCurrentProcess()) + { + KeUnstackDetachProcess(&ApcState); + } + } + _SEH2_END; } - /* * @unimplemented */ Index: pool.c =================================================================== --- ntoskrnl/mm/ARM3/pool.c (revision 69769) +++ ntoskrnl/mm/ARM3/pool.c (working copy) @@ -1387,7 +1387,37 @@ MmAllocateMappingAddress(IN SIZE_T NumberOfBytes, IN ULONG PoolTag) { - UNIMPLEMENTED; + PFN_NUMBER SizeInPages; + PMMPTE PointerPte; + MMPTE TempPte; + + /* How many PTEs does the caller want? */ + SizeInPages = BYTES_TO_PAGES(NumberOfBytes); + if (SizeInPages == 0) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x100, /* Requested 0 mappings */ + SizeInPages, + PoolTag, + (ULONG_PTR)_ReturnAddress()); + } + + /* We need two extra PTEs to store size and pool tag in */ + SizeInPages += 2; + + /* Reserve our PTEs */ + PointerPte = MiReserveSystemPtes(SizeInPages, SystemPteSpace); + if (PointerPte) + { + TempPte.u.Long = SizeInPages << 1; + MI_WRITE_INVALID_PTE(&PointerPte[0], TempPte); + TempPte.u.Long = PoolTag; + TempPte.u.Hard.Valid = 0; + MI_WRITE_INVALID_PTE(&PointerPte[1], TempPte); + return MiPteToAddress(PointerPte + 2); + } + + /* Failed to reserve PTEs */ return NULL; } @@ -1399,7 +1429,52 @@ MmFreeMappingAddress(IN PVOID BaseAddress, IN ULONG PoolTag) { - UNIMPLEMENTED; + PMMPTE PointerPte; + MMPTE TempPte; + PFN_NUMBER SizeInPages; + PFN_NUMBER i; + + /* Get the first PTE we reserved */ + PointerPte = MiAddressToPte(BaseAddress) - 2; + + /* Verify that the pool tag matches */ + TempPte.u.Long = PoolTag; + TempPte.u.Hard.Valid = 0; + if (PointerPte[1].u.Long != TempPte.u.Long) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x101, /* Trying to free an address it does not own */ + (ULONG_PTR)BaseAddress, + PoolTag, + PointerPte[1].u.Long); + } + + /* We must have a size */ + SizeInPages = PointerPte[0].u.Long >> 1; + if (PointerPte[0].u.Long < (3 << 1)) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x102, /* Mapping apparently empty */ + (ULONG_PTR)BaseAddress, + PoolTag, + (ULONG_PTR)_ReturnAddress()); + } + + /* The PTEs must not be mapped. Check the first one to make sure */ + for (i = 2; i < SizeInPages; i++) + { + if (PointerPte[i].u.Long != 0) + { + KeBugCheckEx(SYSTEM_PTE_MISUSE, + 0x103, /* Mapping address still reserved */ + (ULONG_PTR)PointerPte, + PoolTag, + SizeInPages - 2); + } + } + + /* Release the PTEs */ + MiReleaseSystemPtes(PointerPte, SizeInPages, SystemPteSpace); } /* EOF */