2008-03-09 22:11:42 +08:00
|
|
|
/*
|
2005-01-26 21:58:37 +08:00
|
|
|
* COPYRIGHT: See COPYING in the top level directory
|
|
|
|
* PROJECT: ReactOS kernel
|
|
|
|
* FILE: ntoskrnl/mm/freelist.c
|
|
|
|
* PURPOSE: Handle the list of free physical pages
|
2005-05-09 09:38:29 +08:00
|
|
|
*
|
2005-01-26 21:58:37 +08:00
|
|
|
* PROGRAMMERS: David Welch (welch@cwcom.net)
|
|
|
|
* Robert Bergkvist
|
1998-08-25 12:27:26 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* INCLUDES ****************************************************************/
|
|
|
|
|
2004-08-16 00:39:12 +08:00
|
|
|
#include <ntoskrnl.h>
|
1998-08-25 12:27:26 +08:00
|
|
|
#define NDEBUG
|
2008-08-31 00:31:06 +08:00
|
|
|
#include <debug.h>
|
1998-08-25 12:27:26 +08:00
|
|
|
|
- Reimplement MmAllocateContiguousMemory, MmAllocateContiguousMemorySpecifyCache, MmFreeContiguousMemory, MmFreeContiguousMemorySpecifyCache:
- Use a smarter algorithm (as described here: http://www.microsoft.com/whdc/Driver/tips/ContigMem.mspx) to first try to satisfy the allocation by a simple nonpaged pool allocation (for cached requests only).
- This range is then checked for physical continuity, since it's not guaranteed for non-initial-pool allocations (and right now in ReactOS, it never is).
- As a fallback, Windows NT then attempts to scan free nonpaged pool pages. This is not yet implemented since the ReactOS nonpaged pool is not usually contiguous (to the level that NT's is).
- When the ARM pool is implemented and replaces nonpaged pool, this code path will have to be implemented.
- As a last resort, the actual PFN database is scanned for contiguous free pages.
- ReactOS used MmGetContiguousPages for this, which blindly scanned the PFN database. New MiFindContinuousPages will scan the physical memory descriptor block recently implemented, which avoids going over pages we already know are going to be unusable.
- The ReactOS function also held the PFN lock for the entire duration of the scan, which is significant on systems with large memory. Instead, we make an initial unsafe scan first, and only lock when we think we've found a correct range (and we'll then reconfirm the ranges).
- Finally, the older function actually did a double-scan to try to avoid using memory ranges under 16MB, which was useless on today's systems and also rather inefficient.
- Other than that, the actual setup of the PFN entry is copy-pasted from the old ReactOS function, so nothing's changed there -- the page still looks the same, but the selection algorithm is faster and more accurate.
- Once the pages are found, we piggyback on the new I/O mapping mechanism (which uses System PTEs) instead of doing it all over by hand as before.
- Since the underlying support is still System PTEs, once again, optimizations to that component will yield significant improvements here too.
svn path=/trunk/; revision=41657
2009-06-28 13:43:12 +08:00
|
|
|
#define MODULE_INVOLVED_IN_ARM3
|
|
|
|
#include "ARM3/miarm.h"
|
2005-11-29 07:25:31 +08:00
|
|
|
|
2014-02-08 23:54:38 +08:00
|
|
|
#define ASSERT_IS_ROS_PFN(x) ASSERT(MI_IS_ROS_PFN(x) == TRUE);
|
1999-03-31 18:59:32 +08:00
|
|
|
|
2014-02-08 23:54:38 +08:00
|
|
|
/* GLOBALS ****************************************************************/
|
2009-06-21 11:57:42 +08:00
|
|
|
|
2014-02-08 23:54:38 +08:00
|
|
|
PMMPFN MmPfnDatabase;
|
2002-05-15 05:19:21 +08:00
|
|
|
|
2010-04-21 06:47:51 +08:00
|
|
|
PFN_NUMBER MmAvailablePages;
|
|
|
|
PFN_NUMBER MmResidentAvailablePages;
|
|
|
|
PFN_NUMBER MmResidentAvailableAtInit;
|
2009-10-31 09:02:35 +08:00
|
|
|
|
|
|
|
SIZE_T MmTotalCommittedPages;
|
|
|
|
SIZE_T MmSharedCommit;
|
|
|
|
SIZE_T MmDriverCommit;
|
|
|
|
SIZE_T MmProcessCommit;
|
|
|
|
SIZE_T MmPagedPoolCommit;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 21:11:45 +08:00
|
|
|
SIZE_T MmPeakCommitment;
|
2009-10-31 09:02:35 +08:00
|
|
|
SIZE_T MmtotalCommitLimitMaximum;
|
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
static RTL_BITMAP MiUserPfnBitMap;
|
|
|
|
|
|
|
|
/* FUNCTIONS *************************************************************/
|
|
|
|
|
|
|
|
VOID
|
|
|
|
NTAPI
|
|
|
|
MiInitializeUserPfnBitmap(VOID)
|
|
|
|
{
|
|
|
|
PVOID Bitmap;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
/* Allocate enough buffer for the PFN bitmap and align it on 32-bits */
|
|
|
|
Bitmap = ExAllocatePoolWithTag(NonPagedPool,
|
|
|
|
(((MmHighestPhysicalPage + 1) + 31) / 32) * 4,
|
2016-01-06 03:53:07 +08:00
|
|
|
TAG_MM);
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
ASSERT(Bitmap);
|
|
|
|
|
|
|
|
/* Initialize it and clear all the bits to begin with */
|
|
|
|
RtlInitializeBitMap(&MiUserPfnBitMap,
|
|
|
|
Bitmap,
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 21:11:45 +08:00
|
|
|
(ULONG)MmHighestPhysicalPage + 1);
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
RtlClearAllBits(&MiUserPfnBitMap);
|
|
|
|
}
|
|
|
|
|
2010-07-16 06:50:12 +08:00
|
|
|
PFN_NUMBER
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2001-12-31 09:53:46 +08:00
|
|
|
MmGetLRUFirstUserPage(VOID)
|
|
|
|
{
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
ULONG Position;
|
|
|
|
KIRQL OldIrql;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
/* Find the first user page */
|
2017-11-22 06:33:42 +08:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
Position = RtlFindSetBits(&MiUserPfnBitMap, 1, 0);
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(OldIrql);
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
if (Position == 0xFFFFFFFF) return 0;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
/* Return it */
|
2010-11-02 22:42:08 +08:00
|
|
|
ASSERT(Position != 0);
|
|
|
|
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
return Position;
|
2001-12-31 09:53:46 +08:00
|
|
|
}
|
|
|
|
|
2003-07-12 09:52:10 +08:00
|
|
|
VOID
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmInsertLRULastUserPage(PFN_NUMBER Pfn)
|
2003-07-12 09:52:10 +08:00
|
|
|
{
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
KIRQL OldIrql;
|
2003-07-12 09:52:10 +08:00
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
/* Set the page as a user page */
|
2010-11-02 22:42:08 +08:00
|
|
|
ASSERT(Pfn != 0);
|
|
|
|
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Pfn));
|
2011-11-16 02:36:26 +08:00
|
|
|
ASSERT(!RtlCheckBit(&MiUserPfnBitMap, (ULONG)Pfn));
|
2017-11-22 06:33:42 +08:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 21:11:45 +08:00
|
|
|
RtlSetBit(&MiUserPfnBitMap, (ULONG)Pfn);
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(OldIrql);
|
2003-07-12 09:52:10 +08:00
|
|
|
}
|
|
|
|
|
2010-07-16 06:50:12 +08:00
|
|
|
PFN_NUMBER
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmGetLRUNextUserPage(PFN_NUMBER PreviousPfn)
|
2001-12-31 09:53:46 +08:00
|
|
|
{
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
ULONG Position;
|
|
|
|
KIRQL OldIrql;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
/* Find the next user page */
|
2017-11-22 06:33:42 +08:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 21:11:45 +08:00
|
|
|
Position = RtlFindSetBits(&MiUserPfnBitMap, 1, (ULONG)PreviousPfn + 1);
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(OldIrql);
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
if (Position == 0xFFFFFFFF) return 0;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
/* Return it */
|
2010-11-02 22:42:08 +08:00
|
|
|
ASSERT(Position != 0);
|
|
|
|
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Position));
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
return Position;
|
2001-12-31 09:53:46 +08:00
|
|
|
}
|
|
|
|
|
2008-03-13 23:26:17 +08:00
|
|
|
VOID
|
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmRemoveLRUUserPage(PFN_NUMBER Page)
|
2008-03-13 23:26:17 +08:00
|
|
|
{
|
2011-11-29 12:22:51 +08:00
|
|
|
KIRQL OldIrql;
|
|
|
|
|
[NTOS]: Instead of having an LRU linked list of working set pages, we instead have a bitmap.
Advantage: Pages are only in a linked list when they are NOT active (free/zeroed, for now). This makes the LIST_ENTRY fields usable when a page is active, so we can store data in there. This will make it easier to sync our PFN format with Windows.
Advantage: It's a lot faster to set/clear bits than to do list operations (both still O1 though). Scanning for the bit is a bit slower than parsing a list, on the other hand, so it's a toss.
Disadvantage: We lose LRU, which in theory makes us cannibalize working sets randomly instead of by-usage. However, considering the speed of ReactOS paging, and the effects of canabalizing the WS in the first place, I doubt this is really a problem.
The main point of this is advantage #1 -- making used pages not be on any lists. This will allow us to almost 100% sync the PFN layouts, which will lead to the eventual negation of any temporary disavantages.
svn path=/trunk/; revision=45616
2010-02-19 08:46:35 +08:00
|
|
|
/* Unset the page as a user page */
|
2010-11-02 22:42:08 +08:00
|
|
|
ASSERT(Page != 0);
|
|
|
|
ASSERT_IS_ROS_PFN(MiGetPfnEntry(Page));
|
2011-11-16 02:36:26 +08:00
|
|
|
ASSERT(RtlCheckBit(&MiUserPfnBitMap, (ULONG)Page));
|
2017-11-22 06:33:42 +08:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 21:11:45 +08:00
|
|
|
RtlClearBit(&MiUserPfnBitMap, (ULONG)Page);
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(OldIrql);
|
2008-03-13 23:26:17 +08:00
|
|
|
}
|
|
|
|
|
2010-05-10 02:06:38 +08:00
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MiIsPfnFree(IN PMMPFN Pfn1)
|
|
|
|
{
|
|
|
|
/* Must be a free or zero page, with no references, linked */
|
|
|
|
return ((Pfn1->u3.e1.PageLocation <= StandbyPageList) &&
|
|
|
|
(Pfn1->u1.Flink) &&
|
|
|
|
(Pfn1->u2.Blink) &&
|
|
|
|
!(Pfn1->u3.e2.ReferenceCount));
|
|
|
|
}
|
|
|
|
|
2010-02-12 02:44:28 +08:00
|
|
|
BOOLEAN
|
|
|
|
NTAPI
|
|
|
|
MiIsPfnInUse(IN PMMPFN Pfn1)
|
|
|
|
{
|
2010-05-10 02:06:38 +08:00
|
|
|
/* Standby list or higher, unlinked, and with references */
|
|
|
|
return !MiIsPfnFree(Pfn1);
|
2010-02-12 02:44:28 +08:00
|
|
|
}
|
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
PMDL
|
|
|
|
NTAPI
|
|
|
|
MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
|
|
|
|
IN PHYSICAL_ADDRESS HighAddress,
|
|
|
|
IN PHYSICAL_ADDRESS SkipBytes,
|
|
|
|
IN SIZE_T TotalBytes,
|
|
|
|
IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute,
|
|
|
|
IN ULONG MdlFlags)
|
|
|
|
{
|
|
|
|
PMDL Mdl;
|
|
|
|
PFN_NUMBER PageCount, LowPage, HighPage, SkipPages, PagesFound = 0, Page;
|
|
|
|
PPFN_NUMBER MdlPage, LastMdlPage;
|
|
|
|
KIRQL OldIrql;
|
2014-02-08 23:54:38 +08:00
|
|
|
PMMPFN Pfn1;
|
2009-07-01 17:59:47 +08:00
|
|
|
INT LookForZeroedPages;
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
|
2013-09-01 00:02:13 +08:00
|
|
|
DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Convert the low address into a PFN
|
|
|
|
//
|
|
|
|
LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Convert, and normalize, the high address into a PFN
|
|
|
|
//
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 21:11:45 +08:00
|
|
|
HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT);
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
if (HighPage > MmHighestPhysicalPage) HighPage = MmHighestPhysicalPage;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Validate skipbytes and convert them into pages
|
|
|
|
//
|
|
|
|
if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
|
|
|
|
SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Remove useless variables in kernel code that were set, but never actually used (dead code, tests, copy/pasters). If a variable was set but not used because of missing/#if'ed out code, a note was added instead.
[NTOS]: In the process, fix bugs in the Event dispatcher code that used Win32 EVENT_TYPE instead of NT KOBJECTS enumeration.
[NTOS]: Fix a bug in ObpInsertHandleCount, where the object access check was being done with the previous mode, instead of honoring the probe mode, which is defined by OBJ_FORCE_ACCESS_CHECK.
[NTOS]: Fix a bug in a section function which was always returning STATUS_SUCCESS, now it returns the result of the previous Status = function assignment. If this isn't desired, then don't check for the Status anymore.
[NTOS]: Note that MDL code does not support SkipBytes argument. If it is used, MDL could be invalid.
[NTOS]: Add checks for VerifierAllocation and set it when needed (WIP).
[NTOS]: Clarify what _WORKING_LINKER_ is, and the legal risks in continuing to use a linker that builds non-Microsoft drivers when used with headers whose EULA specify that they can only be used for Microsoft drivers.
svn path=/trunk/; revision=48692
2010-09-04 16:17:17 +08:00
|
|
|
/* This isn't supported at all */
|
|
|
|
if (SkipPages) DPRINT1("WARNING: Caller requesting SkipBytes, MDL might be mismatched\n");
|
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Now compute the number of pages the MDL will cover
|
|
|
|
//
|
|
|
|
PageCount = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes);
|
|
|
|
do
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Try creating an MDL for these many pages
|
|
|
|
//
|
|
|
|
Mdl = MmCreateMdl(NULL, NULL, PageCount << PAGE_SHIFT);
|
|
|
|
if (Mdl) break;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// This function is not required to return the amount of pages requested
|
|
|
|
// In fact, it can return as little as 1 page, and callers are supposed
|
|
|
|
// to deal with this scenario. So re-attempt the allocation with less
|
|
|
|
// pages than before, and see if it worked this time.
|
|
|
|
//
|
|
|
|
PageCount -= (PageCount >> 4);
|
|
|
|
} while (PageCount);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Wow, not even a single page was around!
|
|
|
|
//
|
|
|
|
if (!Mdl) return NULL;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// This is where the page array starts....
|
|
|
|
//
|
|
|
|
MdlPage = (PPFN_NUMBER)(Mdl + 1);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Lock the PFN database
|
|
|
|
//
|
2017-11-22 06:33:42 +08:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Are we looking for any pages, without discriminating?
|
|
|
|
//
|
|
|
|
if ((LowPage == 0) && (HighPage == MmHighestPhysicalPage))
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Well then, let's go shopping
|
|
|
|
//
|
|
|
|
while (PagesFound < PageCount)
|
|
|
|
{
|
2010-09-28 01:09:33 +08:00
|
|
|
/* Grab a page */
|
2010-11-02 23:16:22 +08:00
|
|
|
MI_SET_USAGE(MI_USAGE_MDL);
|
|
|
|
MI_SET_PROCESS2("Kernel");
|
2015-06-26 19:02:40 +08:00
|
|
|
|
|
|
|
/* FIXME: This check should be smarter */
|
|
|
|
Page = 0;
|
|
|
|
if (MmAvailablePages != 0)
|
|
|
|
Page = MiRemoveAnyPage(0);
|
|
|
|
|
2010-09-28 01:09:33 +08:00
|
|
|
if (Page == 0)
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
{
|
2010-09-28 01:09:33 +08:00
|
|
|
/* This is not good... hopefully we have at least SOME pages */
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
ASSERT(PagesFound);
|
|
|
|
break;
|
|
|
|
}
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2010-09-28 01:09:33 +08:00
|
|
|
/* Grab the page entry for it */
|
|
|
|
Pfn1 = MiGetPfnEntry(Page);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Make sure it's really free
|
|
|
|
//
|
2010-02-12 02:44:28 +08:00
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2010-11-02 22:46:46 +08:00
|
|
|
/* Now setup the page and mark it */
|
|
|
|
Pfn1->u3.e2.ReferenceCount = 1;
|
|
|
|
Pfn1->u2.ShareCount = 1;
|
|
|
|
MI_SET_PFN_DELETED(Pfn1);
|
|
|
|
Pfn1->u4.PteFrame = 0x1FFEDCB;
|
2010-02-12 02:44:28 +08:00
|
|
|
Pfn1->u3.e1.StartOfAllocation = 1;
|
|
|
|
Pfn1->u3.e1.EndOfAllocation = 1;
|
2010-11-02 22:46:46 +08:00
|
|
|
Pfn1->u4.VerifierAllocation = 0;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Save it into the MDL
|
|
|
|
//
|
|
|
|
*MdlPage++ = MiGetPfnEntryIndex(Pfn1);
|
|
|
|
PagesFound++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// You want specific range of pages. We'll do this in two runs
|
|
|
|
//
|
|
|
|
for (LookForZeroedPages = 1; LookForZeroedPages >= 0; LookForZeroedPages--)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Scan the range you specified
|
|
|
|
//
|
|
|
|
for (Page = LowPage; Page < HighPage; Page++)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Get the PFN entry for this page
|
|
|
|
//
|
|
|
|
Pfn1 = MiGetPfnEntry(Page);
|
- This is a HIGH RISK patch. It has been tested on multiple emulators and configurations but requires broader input.
- Implement several changes to PFN database management:
- The PTEs for the PFN Database are now created by ARM3. Unlike the old code which create PTE for every page on the machine, ARM3 only creates PTEs to account for pages that should be in the PFN database.
- A second related change is what "pages should be in the PFN database". Previously, reserved or otherwise non-existing (ie: holes) memory regions would get a PFN entry created and marked as "BIOS". This is wasteful and not compatible with Windows: there should not be PFN entries created at all.
- So we removed BIOS PFN entries, and now only create PTEs for valid pages as listed in the physical memory ranges.
- This allows machines with "holes" in their physical address space not to waste dozens of MBs of nonpaged pool
- Also saves memory on regular machines too, since 1-4MB worth of memory will now not be in the DB anymore
- To keep track of pages that are invalid/unknown/ignored, there is now a "PFN Bitmap". This bitmap has one bit set for each valid PFN in the database.
- And so, MiGetPfnEntry now also validates that, if there is a PFN Bitmap, the requested PFN is actually present in the database.
- This introduces a major functional change: device pages, reserved pages, and other BIOS pages cannot be referenced, shared, or managed in any meaningful way.
- We have attempted to fix parts of the OS that depended on this, but there may still be bugs.
- A known issue may be an assertion during reboot and/or shutdown in the hyperspace mapping function. It is currently safe to simply "cont" in the debugger a couple of times.
- We are working on a fix.
svn path=/trunk/; revision=42220
2009-07-26 05:35:31 +08:00
|
|
|
ASSERT(Pfn1);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Make sure it's free and if this is our first pass, zeroed
|
|
|
|
//
|
2010-02-20 02:23:07 +08:00
|
|
|
if (MiIsPfnInUse(Pfn1)) continue;
|
2010-02-12 02:44:28 +08:00
|
|
|
if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) != LookForZeroedPages) continue;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2010-09-28 01:09:33 +08:00
|
|
|
/* Remove the page from the free or zero list */
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
ASSERT(Pfn1->u3.e1.ReadInProgress == 0);
|
2010-11-02 23:16:22 +08:00
|
|
|
MI_SET_USAGE(MI_USAGE_MDL);
|
|
|
|
MI_SET_PROCESS2("Kernel");
|
2010-09-28 01:09:33 +08:00
|
|
|
MiUnlinkFreeOrZeroedPage(Pfn1);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Sanity checks
|
|
|
|
//
|
2010-02-12 02:44:28 +08:00
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Now setup the page and mark it
|
|
|
|
//
|
2010-02-12 02:44:28 +08:00
|
|
|
Pfn1->u3.e2.ReferenceCount = 1;
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
Pfn1->u2.ShareCount = 1;
|
|
|
|
MI_SET_PFN_DELETED(Pfn1);
|
|
|
|
Pfn1->u4.PteFrame = 0x1FFEDCB;
|
2010-02-12 02:44:28 +08:00
|
|
|
Pfn1->u3.e1.StartOfAllocation = 1;
|
|
|
|
Pfn1->u3.e1.EndOfAllocation = 1;
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
Pfn1->u4.VerifierAllocation = 0;
|
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Save this page into the MDL
|
|
|
|
//
|
|
|
|
*MdlPage++ = Page;
|
|
|
|
if (++PagesFound == PageCount) break;
|
|
|
|
}
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// If the first pass was enough, don't keep going, otherwise, go again
|
|
|
|
//
|
|
|
|
if (PagesFound == PageCount) break;
|
|
|
|
}
|
|
|
|
}
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Now release the PFN count
|
|
|
|
//
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(OldIrql);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// We might've found less pages, but not more ;-)
|
|
|
|
//
|
|
|
|
if (PagesFound != PageCount) ASSERT(PagesFound < PageCount);
|
|
|
|
if (!PagesFound)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// If we didn' tfind any pages at all, fail
|
|
|
|
//
|
|
|
|
DPRINT1("NO MDL PAGES!\n");
|
2012-10-07 05:42:19 +08:00
|
|
|
ExFreePoolWithTag(Mdl, TAG_MDL);
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Write out how many pages we found
|
|
|
|
//
|
|
|
|
Mdl->ByteCount = (ULONG)(PagesFound << PAGE_SHIFT);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Terminate the MDL array if there's certain missing pages
|
|
|
|
//
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
if (PagesFound != PageCount) *MdlPage = LIST_HEAD;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Now go back and loop over all the MDL pages
|
|
|
|
//
|
|
|
|
MdlPage = (PPFN_NUMBER)(Mdl + 1);
|
|
|
|
LastMdlPage = MdlPage + PagesFound;
|
|
|
|
while (MdlPage < LastMdlPage)
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// Check if we've reached the end
|
|
|
|
//
|
2009-12-04 01:02:34 +08:00
|
|
|
Page = *MdlPage++;
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
if (Page == LIST_HEAD) break;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
// Get the PFN entry for the page and check if we should zero it out
|
|
|
|
//
|
|
|
|
Pfn1 = MiGetPfnEntry(Page);
|
- This is a HIGH RISK patch. It has been tested on multiple emulators and configurations but requires broader input.
- Implement several changes to PFN database management:
- The PTEs for the PFN Database are now created by ARM3. Unlike the old code which create PTE for every page on the machine, ARM3 only creates PTEs to account for pages that should be in the PFN database.
- A second related change is what "pages should be in the PFN database". Previously, reserved or otherwise non-existing (ie: holes) memory regions would get a PFN entry created and marked as "BIOS". This is wasteful and not compatible with Windows: there should not be PFN entries created at all.
- So we removed BIOS PFN entries, and now only create PTEs for valid pages as listed in the physical memory ranges.
- This allows machines with "holes" in their physical address space not to waste dozens of MBs of nonpaged pool
- Also saves memory on regular machines too, since 1-4MB worth of memory will now not be in the DB anymore
- To keep track of pages that are invalid/unknown/ignored, there is now a "PFN Bitmap". This bitmap has one bit set for each valid PFN in the database.
- And so, MiGetPfnEntry now also validates that, if there is a PFN Bitmap, the requested PFN is actually present in the database.
- This introduces a major functional change: device pages, reserved pages, and other BIOS pages cannot be referenced, shared, or managed in any meaningful way.
- We have attempted to fix parts of the OS that depended on this, but there may still be bugs.
- A known issue may be an assertion during reboot and/or shutdown in the hyperspace mapping function. It is currently safe to simply "cont" in the debugger a couple of times.
- We are working on a fix.
svn path=/trunk/; revision=42220
2009-07-26 05:35:31 +08:00
|
|
|
ASSERT(Pfn1);
|
2010-09-28 05:58:54 +08:00
|
|
|
if (Pfn1->u3.e1.PageLocation != ZeroedPageList) MiZeroPhysicalPage(Page);
|
2010-02-12 02:44:28 +08:00
|
|
|
Pfn1->u3.e1.PageLocation = ActiveAndValid;
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
}
|
2011-12-26 02:21:05 +08:00
|
|
|
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
2010-11-02 22:46:46 +08:00
|
|
|
// We're done, mark the pages as locked
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
//
|
|
|
|
Mdl->Process = NULL;
|
[HAL/NDK]
- Make Vector parameter in HalEnableSystemInterrupt, HalDisableSystemInterrupt and HalBeginSystemInterrupt an ULONG, not an UCHAR
[NDK]
- 64bit fixes for HANDLE_TABLE, KPROCESS, SECTION_IMAGE_INFORMATION, MMADDRESS_LIST, MMVAD_FLAGS, MMVAD, MMVAD_LONG, MMVAD_SHORT, MEMORY_DESCRIPTOR, MEMORY_ALLOCATION_DESCRIPTOR, LdrVerifyMappedImageMatchesChecksum
- KDPC_DATA::DpcQueueDepth is signed on amd64, unsigned on x86
[NTOSKRNL]
- Fix hundreds of MSVC and amd64 warnings
- add a pragma message to FstubFixupEfiPartition, since it looks broken
- Move portable Ke constants from <arch>/cpu.c to krnlinit.c
- Fixed a bug in amd64 KiGeneralProtectionFaultHandler
svn path=/trunk/; revision=53734
2011-09-18 21:11:45 +08:00
|
|
|
Mdl->MdlFlags |= MDL_PAGES_LOCKED;
|
- Major rewrite of Memory Descriptor List (MDL) implementation (moving it towards using System PTEs).
- MmCreateMdl, MmSizeOfMdl: No Change.
- MmBuildMdlForNonPagedPool: Do not use MmGetPfnForProcess, just normal PMMPTE manipulation.
- This seems to cause issues in certain scenarios, because in ReactOS, nonpaged pool, a resident and guaranteed resources, does not always have its PDEs mapped!
- By calling MmGetPfnForProcess, this wound up in the annals of ReactOS mm code, which lazy-remapped the PDE. We detected this issue specifically in the cache manager, and fixed it there. It should not appear anywhere else.
- MmAllocatePagesForMdl, MmAllocatePagesForMdlEx, MmFreePagesFromMdl:
- The *Ex function is now implemented.
- Allocating pages now uses MiAllocatePagesForMdl, which is based on the older MmAllocPagesSpecifyRange.
- The code is cleaner, better commented, and better handles partial MDLs.
- Cache flags are still ignored (so the Ex functionality isn't really there).
- MmMapLockedPages, MmMapLockedPagesSpecifyCache, MmUnmapLockedPages:
- These functions now use System PTEs for the mappings, instead of the hacked-up "MDL Mapping Space".
- This frees up 256MB of Kernel Virtual Address Space.
- Takes advantage of all System PTE functionality.
- Once again, optimizations in the System PTE code will be felt here.
- For user-space mappings however, the old code is still kept and used.
- MiMapLockedPagesInUserSpace and MiUnMapLockedPagesInUserSpace are now in virtual.c and provide this.
- MmProbeAndLockPages, MmUnlockPages:
- The pages are actually probed now, in SEH. This did not seem to happen before (did someone misread the function's name?)
- Probe for write is only done for write access to user pages (as documented).
- We do not probe/check for write access for kernel requests (force Operation to be IoReadAccess).
- Proper locking is used now: Address Space lock for user mappings, PFN lock for kernel mappings.
- Faulting in pages (to make them available before locking) is now done outside the address space/PFN lock.
- You don't want to be holding a spinlock/mutex while doing disk I/O!
- For write/modify access, if the PTE is not writable, fail the request since the PTE protection overrides.
- However, if the PTE is writable but also copy on write, then we'll fault the page in for write access, which is a legitimate operation for certain user-mode scenarios.
- The old version always provided the CopyOnWrite behavior, even for non-CopyOnWrite pages!
- Reference and lock every valid page that has a PFN entry (non-I/O Pages).
- The older code did not seem to lock pages that had to be faulted in (weren't already valid).
- Cleanup the cleanup code (no pun intended). Because we now mark the pages as locked early-on, and because of changes in MmUnlockPages, we can simply use MmUnlockPages in case of error, since it will be able to fully back-out and references/locks that we did.
- Previous code attempted to do this on its own, in a pretty inconsistent manner, which would leave page leaks (both in references and lock count).
- In MmUnlockPages, not as many changes, but we now:
- Still make sure that an I/O Mapping MDL doesn't have valid PFN database pages (non-I/O).
- An MDL can cover pages that are both I/O mapped and RAM mapped, so we have to unlock/dereference the latter instead of skipping them as the old code did.
- Use the PFN lock when checking pages and unlocking/dereferencing them.
- Overall, non-complete MDLs are now marked by having a -1 PFN, and the MDL code has been updated to early-break out of page-scanning loops and/or ignore such pages, which can happen in a sparse MDL.
- Implementation has been tested on VMWare and QEMU for a variety of tasks and was found to be reliable and stable.
svn path=/trunk/; revision=41707
2009-06-30 16:29:22 +08:00
|
|
|
return Mdl;
|
|
|
|
}
|
|
|
|
|
2004-04-11 06:36:07 +08:00
|
|
|
VOID
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
MmSetRmapListHeadPage(PFN_NUMBER Pfn, PMM_RMAP_ENTRY ListHead)
|
2001-12-31 09:53:46 +08:00
|
|
|
{
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
KIRQL oldIrql;
|
|
|
|
PMMPFN Pfn1;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
oldIrql = MiAcquirePfnLock();
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
Pfn1 = MiGetPfnEntry(Pfn);
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
ASSERT(Pfn1);
|
|
|
|
ASSERT_IS_ROS_PFN(Pfn1);
|
|
|
|
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
if (ListHead)
|
|
|
|
{
|
|
|
|
/* Should not be trying to insert an RMAP for a non-active page */
|
|
|
|
ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
/* Set the list head address */
|
2014-02-08 23:54:38 +08:00
|
|
|
Pfn1->RmapListHead = ListHead;
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* ReactOS semantics dictate the page is STILL active right now */
|
|
|
|
ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
/* In this case, the RMAP is actually being removed, so clear field */
|
2014-02-08 23:54:38 +08:00
|
|
|
Pfn1->RmapListHead = NULL;
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
/* ReactOS semantics will now release the page, which will make it free and enter a colored list */
|
|
|
|
}
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(oldIrql);
|
2001-12-31 09:53:46 +08:00
|
|
|
}
|
|
|
|
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
PMM_RMAP_ENTRY
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmGetRmapListHeadPage(PFN_NUMBER Pfn)
|
2001-12-31 09:53:46 +08:00
|
|
|
{
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
KIRQL oldIrql;
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
PMM_RMAP_ENTRY ListHead;
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
PMMPFN Pfn1;
|
|
|
|
|
|
|
|
/* Lock PFN database */
|
2017-11-22 06:33:42 +08:00
|
|
|
oldIrql = MiAcquirePfnLock();
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
|
|
|
|
/* Get the entry */
|
|
|
|
Pfn1 = MiGetPfnEntry(Pfn);
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
ASSERT(Pfn1);
|
|
|
|
ASSERT_IS_ROS_PFN(Pfn1);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
/* Get the list head */
|
2014-02-08 23:54:38 +08:00
|
|
|
ListHead = Pfn1->RmapListHead;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
/* Should not have an RMAP for a non-active page */
|
|
|
|
ASSERT(MiIsPfnInUse(Pfn1) == TRUE);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
/* Release PFN database and return rmap list head */
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(oldIrql);
|
[NTOS]: Adding colored page lists means we need to start using the OriginalPte field as a forward/back link. This is shared with AweReferenceCount, which ReactOS uses as the RMAP list head. However, RMAPped pages shoudl never be free/zero, and non-free-zero pages will never have a color backlink in OriginalPte, so it should theoretically be safe to do this. However, it's possible for the RMAP "get" function to be called on a free/zero page (which would normally return NULL), but with color chaining enabled, the "get" function would misinterpret the backlink as an RMAP entry. Therefore, we overload the ParityError bit to signify "there is an RMAP". The get/set functions now handle this, and the color linkage will ASSERT this later. This way, a colorlink with ParityError == FALSE is not treated as an rmap list head.
svn path=/trunk/; revision=48910
2010-09-28 01:36:54 +08:00
|
|
|
return ListHead;
|
2001-12-31 09:53:46 +08:00
|
|
|
}
|
|
|
|
|
2004-04-11 06:36:07 +08:00
|
|
|
VOID
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmSetSavedSwapEntryPage(PFN_NUMBER Pfn, SWAPENTRY SwapEntry)
|
2000-07-07 18:30:57 +08:00
|
|
|
{
|
2014-10-05 14:33:34 +08:00
|
|
|
KIRQL oldIrql;
|
|
|
|
PMMPFN Pfn1;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
Pfn1 = MiGetPfnEntry(Pfn);
|
|
|
|
ASSERT(Pfn1);
|
|
|
|
ASSERT_IS_ROS_PFN(Pfn1);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
oldIrql = MiAcquirePfnLock();
|
2014-10-05 14:33:34 +08:00
|
|
|
Pfn1->u1.SwapEntry = SwapEntry;
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(oldIrql);
|
2000-07-07 18:30:57 +08:00
|
|
|
}
|
|
|
|
|
2004-04-11 06:36:07 +08:00
|
|
|
SWAPENTRY
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmGetSavedSwapEntryPage(PFN_NUMBER Pfn)
|
1998-08-25 12:27:26 +08:00
|
|
|
{
|
2014-10-05 14:33:34 +08:00
|
|
|
SWAPENTRY SwapEntry;
|
|
|
|
KIRQL oldIrql;
|
|
|
|
PMMPFN Pfn1;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
Pfn1 = MiGetPfnEntry(Pfn);
|
|
|
|
ASSERT(Pfn1);
|
|
|
|
ASSERT_IS_ROS_PFN(Pfn1);
|
2004-01-05 22:28:21 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
oldIrql = MiAcquirePfnLock();
|
2014-10-05 14:33:34 +08:00
|
|
|
SwapEntry = Pfn1->u1.SwapEntry;
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(oldIrql);
|
2002-05-14 02:10:41 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
return(SwapEntry);
|
2002-05-14 02:10:41 +08:00
|
|
|
}
|
|
|
|
|
2004-04-11 06:36:07 +08:00
|
|
|
VOID
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmReferencePage(PFN_NUMBER Pfn)
|
2002-05-14 02:10:41 +08:00
|
|
|
{
|
2014-10-05 14:33:34 +08:00
|
|
|
PMMPFN Pfn1;
|
2004-01-05 22:28:21 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
DPRINT("MmReferencePage(PysicalAddress %x)\n", Pfn << PAGE_SHIFT);
|
2003-07-13 22:36:32 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
MI_ASSERT_PFN_LOCK_HELD();
|
2014-10-05 14:33:34 +08:00
|
|
|
ASSERT(Pfn != 0);
|
|
|
|
ASSERT(Pfn <= MmHighestPhysicalPage);
|
2004-01-05 22:28:21 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
Pfn1 = MiGetPfnEntry(Pfn);
|
|
|
|
ASSERT(Pfn1);
|
|
|
|
ASSERT_IS_ROS_PFN(Pfn1);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
|
|
|
|
Pfn1->u3.e2.ReferenceCount++;
|
2005-03-16 06:07:05 +08:00
|
|
|
}
|
|
|
|
|
2001-02-11 06:51:11 +08:00
|
|
|
ULONG
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmGetReferenceCountPage(PFN_NUMBER Pfn)
|
2001-02-11 06:51:11 +08:00
|
|
|
{
|
2014-10-05 14:33:34 +08:00
|
|
|
KIRQL oldIrql;
|
|
|
|
ULONG RCount;
|
|
|
|
PMMPFN Pfn1;
|
2001-02-11 06:51:11 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
DPRINT("MmGetReferenceCountPage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
|
2001-02-11 06:51:11 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
oldIrql = MiAcquirePfnLock();
|
2014-10-05 14:33:34 +08:00
|
|
|
Pfn1 = MiGetPfnEntry(Pfn);
|
|
|
|
ASSERT(Pfn1);
|
|
|
|
ASSERT_IS_ROS_PFN(Pfn1);
|
2004-01-05 22:28:21 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
RCount = Pfn1->u3.e2.ReferenceCount;
|
2002-05-15 05:19:21 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(oldIrql);
|
2014-10-05 14:33:34 +08:00
|
|
|
return(RCount);
|
2001-02-11 06:51:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
BOOLEAN
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmIsPageInUse(PFN_NUMBER Pfn)
|
2001-02-11 06:51:11 +08:00
|
|
|
{
|
2010-02-12 02:44:28 +08:00
|
|
|
return MiIsPfnInUse(MiGetPfnEntry(Pfn));
|
|
|
|
}
|
2001-02-11 06:51:11 +08:00
|
|
|
|
2004-04-11 06:36:07 +08:00
|
|
|
VOID
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-07-16 06:50:12 +08:00
|
|
|
MmDereferencePage(PFN_NUMBER Pfn)
|
1999-12-20 10:14:40 +08:00
|
|
|
{
|
2014-10-05 14:33:34 +08:00
|
|
|
PMMPFN Pfn1;
|
2014-10-08 08:30:50 +08:00
|
|
|
KIRQL OldIrql;
|
2014-10-05 14:33:34 +08:00
|
|
|
DPRINT("MmDereferencePage(PhysicalAddress %x)\n", Pfn << PAGE_SHIFT);
|
2002-05-15 05:19:21 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
2014-10-08 08:30:50 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
Pfn1 = MiGetPfnEntry(Pfn);
|
|
|
|
ASSERT(Pfn1);
|
|
|
|
ASSERT_IS_ROS_PFN(Pfn1);
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
|
|
|
|
Pfn1->u3.e2.ReferenceCount--;
|
|
|
|
if (Pfn1->u3.e2.ReferenceCount == 0)
|
|
|
|
{
|
2010-09-28 00:00:24 +08:00
|
|
|
/* Mark the page temporarily as valid, we're going to make it free soon */
|
2014-02-08 23:54:38 +08:00
|
|
|
Pfn1->u3.e1.PageLocation = ActiveAndValid;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
[NTOS]: Implement MiDecrementReferenceCount and rewrite large parts of the ProbeAndLock/Unlock MDL API to fully use ARM3 APIs, dropping MmReference/DereferencePage behind.
[NTOS]: Fix many MDL API bugs: correctly check for I/O pages, use LIST_HEAD instead of -1, track system-wide locked pages, use the process working set lock instead of the address space lock, add check for cross-ring MDL mappings, and make some small optimizations.
[NTOS]: Make some more fixes in MmAllocatePagesForMdl, MmFreeMdlPages to make the PFN entries more "correct".
[NTOS]: Had a little breakthrough: instead of complicating our lives and hiding certain ReactOS-Mm fields inside legitimate ARM3/MMPFN fields, differentiate between "legacy" (RosMm) and ARM3 pages. The legacy allocator (MmAllocPage/MmRequestPageMemoryConsumer) will use the non-paged pool to allocate a MMROSPFN add-on (8 bytes), in which the RMAP list head and SWAPENTRY are stored. When a legacy "free" is done, this data is deleted. Additionally, we can now tell apart between ARM3 and RosMm pages, so appropriate ASSERTs have been added to make sure the two never cross paths (which should safely let us use all the PFN fields now and implement working sets, etc...). I don't know why I didn't think of this sooner.
svn path=/trunk/; revision=49201
2010-10-19 12:30:48 +08:00
|
|
|
/* It's not a ROS PFN anymore */
|
2014-02-08 23:54:38 +08:00
|
|
|
Pfn1->u4.AweAllocation = FALSE;
|
2010-09-28 00:00:24 +08:00
|
|
|
|
|
|
|
/* Bring it back into the free list */
|
2010-09-30 11:21:02 +08:00
|
|
|
DPRINT("Legacy free: %lx\n", Pfn);
|
2010-09-28 00:00:24 +08:00
|
|
|
MiInsertPageInFreeList(Pfn);
|
2014-10-05 14:33:34 +08:00
|
|
|
}
|
2014-10-08 08:30:50 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(OldIrql);
|
1999-03-31 18:59:32 +08:00
|
|
|
}
|
1998-10-05 12:01:30 +08:00
|
|
|
|
2010-07-16 06:50:12 +08:00
|
|
|
PFN_NUMBER
|
2005-09-14 09:05:50 +08:00
|
|
|
NTAPI
|
2010-02-20 22:47:23 +08:00
|
|
|
MmAllocPage(ULONG Type)
|
1999-03-31 18:59:32 +08:00
|
|
|
{
|
2014-10-05 14:33:34 +08:00
|
|
|
PFN_NUMBER PfnOffset;
|
|
|
|
PMMPFN Pfn1;
|
2014-10-08 08:30:50 +08:00
|
|
|
KIRQL OldIrql;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
OldIrql = MiAcquirePfnLock();
|
2004-04-11 06:36:07 +08:00
|
|
|
|
2014-10-08 08:30:50 +08:00
|
|
|
PfnOffset = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
|
2014-10-05 14:33:34 +08:00
|
|
|
if (!PfnOffset)
|
|
|
|
{
|
2014-10-08 08:30:50 +08:00
|
|
|
KeBugCheck(NO_PAGES_AVAILABLE);
|
2014-10-05 14:33:34 +08:00
|
|
|
}
|
2010-09-28 00:00:24 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
DPRINT("Legacy allocate: %lx\n", PfnOffset);
|
|
|
|
Pfn1 = MiGetPfnEntry(PfnOffset);
|
|
|
|
Pfn1->u3.e2.ReferenceCount = 1;
|
|
|
|
Pfn1->u3.e1.PageLocation = ActiveAndValid;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
/* This marks the PFN as a ReactOS PFN */
|
|
|
|
Pfn1->u4.AweAllocation = TRUE;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2014-10-05 14:33:34 +08:00
|
|
|
/* Allocate the extra ReactOS Data and zero it out */
|
|
|
|
Pfn1->u1.SwapEntry = 0;
|
|
|
|
Pfn1->RmapListHead = NULL;
|
2011-12-26 02:21:05 +08:00
|
|
|
|
2017-11-22 06:33:42 +08:00
|
|
|
MiReleasePfnLock(OldIrql);
|
2014-10-05 14:33:34 +08:00
|
|
|
return PfnOffset;
|
1998-08-25 12:27:26 +08:00
|
|
|
}
|
2003-07-06 18:34:32 +08:00
|
|
|
|
2003-07-11 05:05:04 +08:00
|
|
|
/* EOF */
|