reactos/ntoskrnl/mm/freelist.c

600 lines
15 KiB
C
Raw Normal View History

/*
* COPYRIGHT: See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/freelist.c
* PURPOSE: Handle the list of free physical pages
*
* PROGRAMMERS: David Welch (welch@cwcom.net)
* Robert Bergkvist
*/
/* INCLUDES ****************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
- Reimplement MmAllocateContiguousMemory, MmAllocateContiguousMemorySpecifyCache, MmFreeContiguousMemory, MmFreeContiguousMemorySpecifyCache: - Use a smarter algorithm (as described here: http://www.microsoft.com/whdc/Driver/tips/ContigMem.mspx) to first try to satisfy the allocation by a simple nonpaged pool allocation (for cached requests only). - This range is then checked for physical continuity, since it's not guaranteed for non-initial-pool allocations (and right now in ReactOS, it never is). - As a fallback, Windows NT then attempts to scan free nonpaged pool pages. This is not yet implemented since the ReactOS nonpaged pool is not usually contiguous (to the level that NT's is). - When the ARM pool is implemented and replaces nonpaged pool, this code path will have to be implemented. - As a last resort, the actual PFN database is scanned for contiguous free pages. - ReactOS used MmGetContiguousPages for this, which blindly scanned the PFN database. New MiFindContinuousPages will scan the physical memory descriptor block recently implemented, which avoids going over pages we already know are going to be unusable. - The ReactOS function also held the PFN lock for the entire duration of the scan, which is significant on systems with large memory. Instead, we make an initial unsafe scan first, and only lock when we think we've found a correct range (and we'll then reconfirm the ranges). - Finally, the older function actually did a double-scan to try to avoid using memory ranges under 16MB, which was useless on today's systems and also rather inefficient. - Other than that, the actual setup of the PFN entry is copy-pasted from the old ReactOS function, so nothing's changed there -- the page still looks the same, but the selection algorithm is faster and more accurate. - Once the pages are found, we piggyback on the new I/O mapping mechanism (which uses System PTEs) instead of doing it all over by hand as before. - Since the underlying support is still System PTEs, once again, optimizations to that component will yield significant improvements here too. svn path=/trunk/; revision=41657
2009-06-28 13:43:12 +08:00
#define MODULE_INVOLVED_IN_ARM3
#include "ARM3/miarm.h"
#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
/* GLOBALS ****************************************************************/
PMMPFN MmPfnDatabase;
PFN_NUMBER MmAvailablePages;
PFN_NUMBER MmResidentAvailablePages;
PFN_NUMBER MmResidentAvailableAtInit;
SIZE_T MmTotalCommittedPages;
SIZE_T MmSharedCommit;
SIZE_T MmDriverCommit;
SIZE_T MmProcessCommit;
SIZE_T MmPagedPoolCommit;
SIZE_T MmPeakCommitment;
SIZE_T MmtotalCommitLimitMaximum;
static RTL_BITMAP MiUserPfnBitMap;
/* FUNCTIONS *************************************************************/
VOID
NTAPI
MiInitializeUserPfnBitmap(VOID)
{
PVOID Bitmap;
/* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
Bitmap = ExAllocatePoolWithTag(NonPagedPool,
(((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
TAG_MM);
ASSERT(Bitmap);
/* Initialize it and clear all the bits to begin with */
RtlInitializeBitMap(&MiUserPfnBitMap,
Bitmap,
(ULONG)MmHighestPhysicalPage + 1);
RtlClearAllBits(&MiUserPfnBitMap);
}
PFN_NUMBER
NTAPI
MmGetLRUFirstUserPage(VOID)
{
ULONG Position;
KIRQL OldIrql;
/* Find the first user page */
OldIrql = MiAcquirePfnLock();
Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
MiReleasePfnLock(OldIrql);
if (Position == 0xFFFFFFFF) return 0;
/* Return it */
ASSERT(Position != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
return Position;
}
VOID
NTAPI
MmInsertLRULastUserPage(PFN_NUMBER Pfn)
{
KIRQL OldIrql;
/* Set the page as a user page */
ASSERT(Pfn != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
OldIrql = MiAcquirePfnLock();
RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
MiReleasePfnLock(OldIrql);
}
PFN_NUMBER
NTAPI
MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
{
ULONG Position;
KIRQL OldIrql;
/* Find the next user page */
OldIrql = MiAcquirePfnLock();
Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
MiReleasePfnLock(OldIrql);
if (Position == 0xFFFFFFFF) return 0;
/* Return it */
ASSERT(Position != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
return Position;
}
VOID
NTAPI
MmRemoveLRUUserPage(PFN_NUMBER Page)
{
KIRQL OldIrql;
/* Unset the page as a user page */
ASSERT(Page != 0);
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
OldIrql = MiAcquirePfnLock();
RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
MiReleasePfnLock(OldIrql);
}
BOOLEAN
NTAPI
MiIsPfnFree(IN PMMPFN Pfn1)
{
/* Must be a free or zero page, with no references, linked */
return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
(Pfn1->u1.Flink) &&
(Pfn1->u2.Blink) &&
!(Pfn1->u3.e2.ReferenceCount));
}
BOOLEAN
NTAPI
MiIsPfnInUse(IN PMMPFN Pfn1)
{
/* Standby list or higher, unlinked, and with references */
return !MiIsPfnFree(Pfn1);
}
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
PMDL
NTAPI
MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
IN PHYSICAL_ADDRESS HighAddress,
IN PHYSICAL_ADDRESS SkipBytes,
IN SIZE_T TotalBytes,
IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
IN ULONG MdlFlags)
{
PMDL Mdl;
PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
PPFN_NUMBER MdlPage, LastMdlPage;
KIRQL OldIrql;
PMMPFN Pfn1;
INT LookForZeroedPages;
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Convert the low address into a PFN
//
LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Convert, and normalize, the high address into a PFN
//
HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Validate skipbytes and convert them into pages
//
if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
/* This isn't supported at all */
if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Now compute the number of pages the MDL will cover
//
PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
do
{
//
// Try creating an MDL for these many pages
//
Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
if (Mdl) break;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// This function is not required to return the amount of pages requested
// In fact, it can return as little as 1 page, and callers are supposed
// to deal with this scenario. So re-attempt the allocation with less
// pages than before, and see if it worked this time.
//
PageCount -= (PageCount >> 4);
} while (PageCount);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Wow, not even a single page was around!
//
if (!Mdl) return NULL;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// This is where the page array starts....
//
MdlPage = (PPFN_NUMBER)(Mdl + 1);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Lock the PFN database
//
OldIrql = MiAcquirePfnLock();
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Are we looking for any pages, without discriminating?
//
if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
{
//
// Well then, let's go shopping
//
while (PagesFound < PageCount)
{
/* Grab a page */
MI_SET_USAGE(MI_USAGE_MDL);
MI_SET_PROCESS2("Kernel");
/* FIXME: This check should be smarter */
Page = 0;
if (MmAvailablePages != 0)
Page = MiRemoveAnyPage(0);
if (Page == 0)
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
{
/* This is not good... hopefully we have at least SOME pages */
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
ASSERT(PagesFound);
break;
}
/* Grab the page entry for it */
Pfn1 = MiGetPfnEntry(Page);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Make sure it's really free
//
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
/* Now setup the page and mark it */
Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u2.ShareCount = 1;
MI_SET_PFN_DELETED(Pfn1);
Pfn1->u4.PteFrame = 0x1FFEDCB;
Pfn1->u3.e1.StartOfAllocation = 1;
Pfn1->u3.e1.EndOfAllocation = 1;
Pfn1->u4.VerifierAllocation = 0;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Save it into the MDL
//
*MdlPage++ = MiGetPfnEntryIndex(Pfn1);
PagesFound++;
}
}
else
{
//
// You want specific range of pages. We'll do this in two runs
//
for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
{
//
// Scan the range you specified
//
for (Page = LowPage; Page < HighPage; Page++)
{
//
// Get the PFN entry for this page
//
Pfn1 = MiGetPfnEntry(Page);
- This is a HIGH RISK patch. It has been tested on multiple emulators and configurations but requires broader input. - Implement several changes to PFN database management: - The PTEs for the PFN Database are now created by ARM3. Unlike the old code which create PTE for every page on the machine, ARM3 only creates PTEs to account for pages that should be in the PFN database. - A second related change is what "pages should be in the PFN database". Previously, reserved or otherwise non-existing (ie: holes) memory regions would get a PFN entry created and marked as "BIOS". This is wasteful and not compatible with Windows: there should not be PFN entries created at all. - So we removed BIOS PFN entries, and now only create PTEs for valid pages as listed in the physical memory ranges. - This allows machines with "holes" in their physical address space not to waste dozens of MBs of nonpaged pool - Also saves memory on regular machines too, since 1-4MB worth of memory will now not be in the DB anymore - To keep track of pages that are invalid/unknown/ignored, there is now a "PFN Bitmap". This bitmap has one bit set for each valid PFN in the database. - And so, MiGetPfnEntry now also validates that, if there is a PFN Bitmap, the requested PFN is actually present in the database. - This introduces a major functional change: device pages, reserved pages, and other BIOS pages cannot be referenced, shared, or managed in any meaningful way. - We have attempted to fix parts of the OS that depended on this, but there may still be bugs. - A known issue may be an assertion during reboot and/or shutdown in the hyperspace mapping function. It is currently safe to simply "cont" in the debugger a couple of times. - We are working on a fix. svn path=/trunk/; revision=42220
2009-07-26 05:35:31 +08:00
ASSERT(Pfn1);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Make sure it's free and if this is our first pass, zeroed
//
if (MiIsPfnInUse(Pfn1)) continue;
if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
/* Remove the page from the free or zero list */
ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
MI_SET_USAGE(MI_USAGE_MDL);
MI_SET_PROCESS2("Kernel");
MiUnlinkFreeOrZeroedPage(Pfn1);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Sanity checks
//
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Now setup the page and mark it
//
Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u2.ShareCount = 1;
MI_SET_PFN_DELETED(Pfn1);
Pfn1->u4.PteFrame = 0x1FFEDCB;
Pfn1->u3.e1.StartOfAllocation = 1;
Pfn1->u3.e1.EndOfAllocation = 1;
Pfn1->u4.VerifierAllocation = 0;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Save this page into the MDL
//
*MdlPage++ = Page;
if (++PagesFound == PageCount) break;
}
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// If the first pass was enough, don't keep going, otherwise, go again
//
if (PagesFound == PageCount) break;
}
}
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Now release the PFN count
//
MiReleasePfnLock(OldIrql);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// We might've found less pages, but not more ;-)
//
if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
if (!PagesFound)
{
//
// If we didn' tfind any pages at all, fail
//
DPRINT1("NO MDL PAGES!\n");
ExFreePoolWithTag(Mdl, TAG_MDL);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
return NULL;
}
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Write out how many pages we found
//
Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Terminate the MDL array if there's certain missing pages
//
if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Now go back and loop over all the MDL pages
//
MdlPage = (PPFN_NUMBER)(Mdl + 1);
LastMdlPage = MdlPage + PagesFound;
while (MdlPage < LastMdlPage)
{
//
// Check if we've reached the end
//
Page = *MdlPage++;
if (Page == LIST_HEAD) break;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// Get the PFN entry for the page and check if we should zero it out
//
Pfn1 = MiGetPfnEntry(Page);
- This is a HIGH RISK patch. It has been tested on multiple emulators and configurations but requires broader input. - Implement several changes to PFN database management: - The PTEs for the PFN Database are now created by ARM3. Unlike the old code which create PTE for every page on the machine, ARM3 only creates PTEs to account for pages that should be in the PFN database. - A second related change is what "pages should be in the PFN database". Previously, reserved or otherwise non-existing (ie: holes) memory regions would get a PFN entry created and marked as "BIOS". This is wasteful and not compatible with Windows: there should not be PFN entries created at all. - So we removed BIOS PFN entries, and now only create PTEs for valid pages as listed in the physical memory ranges. - This allows machines with "holes" in their physical address space not to waste dozens of MBs of nonpaged pool - Also saves memory on regular machines too, since 1-4MB worth of memory will now not be in the DB anymore - To keep track of pages that are invalid/unknown/ignored, there is now a "PFN Bitmap". This bitmap has one bit set for each valid PFN in the database. - And so, MiGetPfnEntry now also validates that, if there is a PFN Bitmap, the requested PFN is actually present in the database. - This introduces a major functional change: device pages, reserved pages, and other BIOS pages cannot be referenced, shared, or managed in any meaningful way. - We have attempted to fix parts of the OS that depended on this, but there may still be bugs. - A known issue may be an assertion during reboot and/or shutdown in the hyperspace mapping function. It is currently safe to simply "cont" in the debugger a couple of times. - We are working on a fix. svn path=/trunk/; revision=42220
2009-07-26 05:35:31 +08:00
ASSERT(Pfn1);
if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
Pfn1->u3.e1.PageLocation = ActiveAndValid;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
}
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
// We're done, mark the pages as locked
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
//
Mdl->Process = NULL;
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs). - MmCreateMdl, MmSizeOfMdl: No Change. - MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation. - This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped! - By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else. - MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl: - The *Ex function is now implemented. - Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange. - The code is cleaner, better commented, and better handles partial MDLs. - Cache flags are still ignored (so the Ex functionality isn't really there). - MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages: - These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space". - This frees up 256MB of Kernel Virtual Address Space. - Takes advantage of all System PTE functionality. - Once again, optimizations in the System PTE code will be felt here. - For user-space mappings however, the old code is still kept and used. - MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this. - MmProbeAndLockPages, MmUnlockPages: - The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?) - Probe for write is only done for write access to user pages (as documented). - We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess). - Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings. - Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock. - You don't want to be holding a spinlock/mutex while doing disk I/O! - For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides. - However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios. - The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages! - Reference and lock every valid page that has a PFN entry (non-I/O Pages). - The older code did not seem to lock pages that had to be faulted in (weren't already valid). - Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did. - Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count). - In MmUnlockPages, not as many changes, but we now: - Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O). - An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did. - Use the PFN lock when checking pages and unlocking/dereferencing them. - Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL. - Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable. svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
return Mdl;
}
VOID
NTAPI
MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
{
KIRQL oldIrql;
PMMPFN Pfn1;
oldIrql = MiAcquirePfnLock();
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
if (ListHead)
{
/* Should not be trying to insert an RMAP for a non-active page */
ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
/* Set the list head address */
Pfn1->RmapListHead = ListHead;
}
else
{
/* ReactOS semantics dictate the page is STILL active right now */
ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
/* In this case, the RMAP is actually being removed, so clear field */
Pfn1->RmapListHead = NULL;
/* ReactOS semantics will now release the page, which will make it free and enter a colored list */
}
MiReleasePfnLock(oldIrql);
}
PMM_RMAP_ENTRY
NTAPI
MmGetRmapListHeadPage(PFN_NUMBER Pfn)
{
KIRQL oldIrql;
PMM_RMAP_ENTRY ListHead;
PMMPFN Pfn1;
/* Lock PFN database */
oldIrql = MiAcquirePfnLock();
/* Get the entry */
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
/* Get the list head */
ListHead = Pfn1->RmapListHead;
/* Should not have an RMAP for a non-active page */
ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
/* Release PFN database and return rmap list head */
MiReleasePfnLock(oldIrql);
return ListHead;
}
VOID
NTAPI
MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
{
KIRQL oldIrql;
PMMPFN Pfn1;
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
oldIrql = MiAcquirePfnLock();
Pfn1->u1.SwapEntry = SwapEntry;
MiReleasePfnLock(oldIrql);
}
SWAPENTRY
NTAPI
MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
{
SWAPENTRY SwapEntry;
KIRQL oldIrql;
PMMPFN Pfn1;
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
oldIrql = MiAcquirePfnLock();
SwapEntry = Pfn1->u1.SwapEntry;
MiReleasePfnLock(oldIrql);
return(SwapEntry);
}
VOID
NTAPI
MmReferencePage(PFN_NUMBER Pfn)
{
PMMPFN Pfn1;
DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
MI_ASSERT_PFN_LOCK_HELD();
ASSERT(Pfn != 0);
ASSERT(Pfn <= MmHighestPhysicalPage);
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
Pfn1->u3.e2.ReferenceCount++;
}
ULONG
NTAPI
MmGetReferenceCountPage(PFN_NUMBER Pfn)
{
KIRQL oldIrql;
ULONG RCount;
PMMPFN Pfn1;
DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
oldIrql = MiAcquirePfnLock();
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
RCount = Pfn1->u3.e2.ReferenceCount;
MiReleasePfnLock(oldIrql);
return(RCount);
}
BOOLEAN
NTAPI
MmIsPageInUse(PFN_NUMBER Pfn)
{
return MiIsPfnInUse(MiGetPfnEntry(Pfn));
}
VOID
NTAPI
MmDereferencePage(PFN_NUMBER Pfn)
{
PMMPFN Pfn1;
KIRQL OldIrql;
DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
OldIrql = MiAcquirePfnLock();
Pfn1 = MiGetPfnEntry(Pfn);
ASSERT(Pfn1);
ASSERT_IS_ROS_PFN(Pfn1);
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
Pfn1->u3.e2.ReferenceCount--;
if (Pfn1->u3.e2.ReferenceCount == 0)
{
/* Mark the page temporarily as valid, we're going to make it free soon */
Pfn1->u3.e1.PageLocation = ActiveAndValid;
/* It's not a ROS PFN anymore */
Pfn1->u4.AweAllocation = FALSE;
/* Bring it back into the free list */
DPRINT("Legacy free: %lx\n", Pfn);
MiInsertPageInFreeList(Pfn);
}
MiReleasePfnLock(OldIrql);
}
PFN_NUMBER
NTAPI
MmAllocPage(ULONG Type)
{
PFN_NUMBER PfnOffset;
PMMPFN Pfn1;
KIRQL OldIrql;
OldIrql = MiAcquirePfnLock();
PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
if (!PfnOffset)
{
KeBugCheck(NO_PAGES_AVAILABLE);
}
DPRINT("Legacy allocate: %lx\n", PfnOffset);
Pfn1 = MiGetPfnEntry(PfnOffset);
Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
/* This marks the PFN as a ReactOS PFN */
Pfn1->u4.AweAllocation = TRUE;
/* Allocate the extra ReactOS Data and zero it out */
Pfn1->u1.SwapEntry = 0;
Pfn1->RmapListHead = NULL;
MiReleasePfnLock(OldIrql);
return PfnOffset;
}
/* EOF */