[NTOS]: Perform system space mappings under the SystemSpaceViewLockPointer guarded mutex.

[NTOS]: Expand system space view buckets when they are running low.
[NTOS]: Support SEC_BASED section creates and mappings, implement based VAD parsing.
[NTOS]: Support section mappings at a fixed base address and check for conflicts.
[NTOS]: Define 8 prioritized standyby lists and initialize them. Also define the modified page list.
[NTOS]: Support mapping with SEC_COMMIT less than the entire size of the section.
[NTOS]: Detect and assert if ARM3 sections are attempted to be unmapped, since this isn't supported yet.
[NTOS]: Clean up some DPRINTs and clarify ARM3 ASSERTs vs. Windows ASSERTs.

svn path=/trunk/; revision=56232
This commit is contained in:
Sir Richard 2012-03-26 07:26:36 +00:00
parent 3eb0fe31ad
commit 8acc24467a
6 changed files with 462 additions and 50 deletions

View File

@ -26,7 +26,7 @@
MI_SESSION_IMAGE_SIZE + \
MI_SESSION_WORKING_SET_SIZE)
#define MI_SYSTEM_VIEW_SIZE (16 * _1MB)
#define MI_SYSTEM_VIEW_SIZE (32 * _1MB)
#define MI_HIGHEST_USER_ADDRESS (PVOID)0x7FFEFFFF
#define MI_USER_PROBE_ADDRESS (PVOID)0x7FFF0000
@ -526,6 +526,7 @@ extern ULONG_PTR MxPfnAllocation;
extern MM_PAGED_POOL_INFO MmPagedPoolInfo;
extern RTL_BITMAP MiPfnBitMap;
extern KGUARDED_MUTEX MmPagedPoolMutex;
extern KGUARDED_MUTEX MmSectionCommitMutex;
extern PVOID MmPagedPoolStart;
extern PVOID MmPagedPoolEnd;
extern PVOID MmNonPagedSystemStart;
@ -557,6 +558,7 @@ extern ULONG MmNumberOfSystemPtes;
extern ULONG MmMaximumNonPagedPoolPercent;
extern ULONG MmLargeStackSize;
extern PMMCOLOR_TABLES MmFreePagesByColor[FreePageList + 1];
extern MMPFNLIST MmStandbyPageListByPriority[8];
extern ULONG MmProductType;
extern MM_SYSTEMSIZE MmSystemSize;
extern PKEVENT MiLowMemoryEvent;
@ -600,6 +602,9 @@ extern PVOID MiSessionPoolStart; // 0xBD000000
extern PVOID MiSessionViewStart; // 0xBE000000
extern ULONG MmMaximumDeadKernelStacks;
extern SLIST_HEADER MmDeadStackSListHead;
extern MM_AVL_TABLE MmSectionBasedRoot;
extern KGUARDED_MUTEX MmSectionBasedMutex;
extern PVOID MmHighSectionBase;
BOOLEAN
FORCEINLINE
@ -1264,6 +1269,12 @@ MiUnlinkFreeOrZeroedPage(
IN PMMPFN Entry
);
VOID
NTAPI
MiUnlinkPageFromList(
IN PMMPFN Pfn
);
PFN_NUMBER
NTAPI
MiAllocatePfn(
@ -1403,6 +1414,16 @@ MiFindEmptyAddressRangeDownTree(
OUT PMMADDRESS_NODE *Parent
);
NTSTATUS
NTAPI
MiFindEmptyAddressRangeDownBasedTree(
IN SIZE_T Length,
IN ULONG_PTR BoundaryAddress,
IN ULONG_PTR Alignment,
IN PMM_AVL_TABLE Table,
OUT PULONG_PTR Base
);
NTSTATUS
NTAPI
MiFindEmptyAddressRangeInTree(
@ -1420,6 +1441,28 @@ MiInsertVad(
IN PEPROCESS Process
);
VOID
NTAPI
MiInsertBasedSection(
IN PSECTION Section
);
NTSTATUS
NTAPI
MiUnmapViewOfSection(
IN PEPROCESS Process,
IN PVOID BaseAddress,
IN ULONG Flags
);
NTSTATUS
NTAPI
MiRosUnmapViewOfSection(
IN PEPROCESS Process,
IN PVOID BaseAddress,
IN ULONG Flags
);
VOID
NTAPI
MiInsertNode(
@ -1520,12 +1563,26 @@ MiRosAllocateVirtualMemory(
IN ULONG Protect
);
NTSTATUS
NTAPI
MiRosUnmapViewInSystemSpace(
IN PVOID MappedBase
);
POOL_TYPE
NTAPI
MmDeterminePoolType(
IN PVOID PoolAddress
);
VOID
NTAPI
MiMakePdeExistAndMakeValid(
IN PMMPTE PointerPde,
IN PEPROCESS TargetProcess,
IN KIRQL OldIrql
);
//
// MiRemoveZeroPage will use inline code to zero out the page manually if only
// free pages are available. In some scenarios, we don't/can't run that piece of

View File

@ -149,7 +149,7 @@ PMMPTE MiSessionLastPte;
// The system view space, on the other hand, is where sections that are memory
// mapped into "system space" end up.
//
// By default, it is a 16MB region.
// By default, it is a 16MB region, but we hack it to be 32MB for ReactOS
//
PVOID MiSystemViewStart;
SIZE_T MmSystemViewSize;
@ -1134,7 +1134,7 @@ MmFreeLoaderBlock(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* See how many pages are in this run */
i = Entry->PageCount;
BasePage = Entry->BasePage;
/* Loop each page */
Pfn1 = MiGetPfnEntry(BasePage);
while (i--)
@ -2062,12 +2062,27 @@ MmArmInitSystem(IN ULONG Phase,
/* Initialize session space address layout */
MiInitializeSessionSpaceLayout();
/* Set the based section highest address */
MmHighSectionBase = (PVOID)((ULONG_PTR)MmHighestUserAddress - 0x800000);
/* Loop all 8 standby lists */
for (i = 0; i < 8; i++)
{
/* Initialize them */
MmStandbyPageListByPriority[i].Total = 0;
MmStandbyPageListByPriority[i].ListName = StandbyPageList;
MmStandbyPageListByPriority[i].Flink = MM_EMPTY_LIST;
MmStandbyPageListByPriority[i].Blink = MM_EMPTY_LIST;
}
/* Initialize the user mode image list */
InitializeListHead(&MmLoadedUserImageList);
/* Initialize the paged pool mutex */
/* Initialize the paged pool mutex and the section commit mutex */
KeInitializeGuardedMutex(&MmPagedPoolMutex);
KeInitializeGuardedMutex(&MmSectionCommitMutex);
KeInitializeGuardedMutex(&MmSectionBasedMutex);
/* Initialize the Loader Lock */
KeInitializeMutant(&MmSystemLoadLock, FALSE);

View File

@ -35,10 +35,15 @@ BOOLEAN MmDynamicPfn;
BOOLEAN MmMirroring;
ULONG MmSystemPageColor;
ULONG MmTransitionSharedPages;
ULONG MmTotalPagesForPagingFile;
MMPFNLIST MmZeroedPageListHead = {0, ZeroedPageList, LIST_HEAD, LIST_HEAD};
MMPFNLIST MmFreePageListHead = {0, FreePageList, LIST_HEAD, LIST_HEAD};
MMPFNLIST MmStandbyPageListHead = {0, StandbyPageList, LIST_HEAD, LIST_HEAD};
MMPFNLIST MmStandbyPageListByPriority[8];
MMPFNLIST MmModifiedPageListHead = {0, ModifiedPageList, LIST_HEAD, LIST_HEAD};
MMPFNLIST MmModifiedPageListByColor[1] = {{0, ModifiedPageList, LIST_HEAD, LIST_HEAD}};
MMPFNLIST MmModifiedNoWritePageListHead = {0, ModifiedNoWritePageList, LIST_HEAD, LIST_HEAD};
MMPFNLIST MmBadPageListHead = {0, BadPageList, LIST_HEAD, LIST_HEAD};
MMPFNLIST MmRomPageListHead = {0, StandbyPageList, LIST_HEAD, LIST_HEAD};

View File

@ -82,6 +82,10 @@ CHAR MmUserProtectionToMask2[16] =
};
MMSESSION MmSession;
KGUARDED_MUTEX MmSectionCommitMutex;
MM_AVL_TABLE MmSectionBasedRoot;
KGUARDED_MUTEX MmSectionBasedMutex;
PVOID MmHighSectionBase;
/* PRIVATE FUNCTIONS **********************************************************/
@ -243,19 +247,87 @@ MiInsertInSystemSpace(IN PMMSESSION Session,
IN PCONTROL_AREA ControlArea)
{
PVOID Base;
ULONG Entry, Hash, i;
ULONG Entry, Hash, i, HashSize;
PMMVIEW OldTable;
PAGED_CODE();
/* Only global mappings supported for now */
ASSERT(Session == &MmSession);
/* Stay within 4GB and don't go past the number of hash entries available */
/* Stay within 4GB */
ASSERT(Buckets < MI_SYSTEM_VIEW_BUCKET_SIZE);
ASSERT(Session->SystemSpaceHashEntries < Session->SystemSpaceHashSize);
/* Lock system space */
KeAcquireGuardedMutex(Session->SystemSpaceViewLockPointer);
/* Check if we're going to exhaust hash entries */
if ((Session->SystemSpaceHashEntries + 8) > Session->SystemSpaceHashSize)
{
/* Double the hash size */
HashSize = Session->SystemSpaceHashSize * 2;
/* Save the old table and allocate a new one */
OldTable = Session->SystemSpaceViewTable;
Session->SystemSpaceViewTable = ExAllocatePoolWithTag(NonPagedPool,
HashSize *
sizeof(MMVIEW),
' mM');
if (!Session->SystemSpaceViewTable)
{
/* Failed to allocate a new table, keep the old one for now */
Session->SystemSpaceViewTable = OldTable;
}
else
{
/* Clear the new table and set the new ahsh and key */
RtlZeroMemory(Session->SystemSpaceViewTable, HashSize * sizeof(MMVIEW));
Session->SystemSpaceHashSize = HashSize;
Session->SystemSpaceHashKey = Session->SystemSpaceHashSize - 1;
/* Loop the old table */
for (i = 0; i < Session->SystemSpaceHashSize / 2; i++)
{
/* Check if the entry was valid */
if (OldTable[i].Entry)
{
/* Re-hash the old entry and search for space in the new table */
Hash = (OldTable[i].Entry >> 16) % Session->SystemSpaceHashKey;
while (Session->SystemSpaceViewTable[Hash].Entry)
{
/* Loop back at the beginning if we had an overflow */
if (++Hash >= Session->SystemSpaceHashSize) Hash = 0;
}
/* Write the old entry in the new table */
Session->SystemSpaceViewTable[Hash] = OldTable[i];
}
}
/* Free the old table */
ExFreePool(OldTable);
}
}
/* Check if we ran out */
if (Session->SystemSpaceHashEntries == Session->SystemSpaceHashSize)
{
DPRINT1("Ran out of system view hash entries\n");
KeReleaseGuardedMutex(Session->SystemSpaceViewLockPointer);
return NULL;
}
/* Find space where to map this view */
i = RtlFindClearBitsAndSet(Session->SystemSpaceBitMap, Buckets, 0);
ASSERT(i != 0xFFFFFFFF);
if (i == 0xFFFFFFFF)
{
/* Out of space, fail */
Session->BitmapFailures++;
DPRINT1("Out of system view space\n");
KeReleaseGuardedMutex(Session->SystemSpaceViewLockPointer);
return NULL;
}
/* Compute the base address */
Base = (PVOID)((ULONG_PTR)Session->SystemSpaceViewStart + (i * MI_SYSTEM_VIEW_BUCKET_SIZE));
/* Get the hash entry for this allocation */
@ -275,6 +347,7 @@ MiInsertInSystemSpace(IN PMMSESSION Session,
/* Hash entry found, increment total and return the base address */
Session->SystemSpaceHashEntries++;
KeReleaseGuardedMutex(Session->SystemSpaceViewLockPointer);
return Base;
}
@ -701,30 +774,25 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
PSEGMENT Segment;
PFN_NUMBER PteOffset;
NTSTATUS Status;
ULONG QuotaCharge = 0, QuotaExcess = 0;
PMMPTE PointerPte, LastPte;
MMPTE TempPte;
/* Get the segment and subection for this section */
/* Get the segment for this section */
Segment = ControlArea->Segment;
Subsection = (PSUBSECTION)(ControlArea + 1);
/* Non-pagefile-backed sections not supported */
ASSERT(ControlArea->u.Flags.GlobalOnlyPerSession == 0);
ASSERT(ControlArea->u.Flags.Rom == 0);
ASSERT(ControlArea->FilePointer == NULL);
ASSERT(Segment->SegmentFlags.TotalNumberOfPtes4132 == 0);
/* One can only reserve a file-based mapping, not shared memory! */
if ((AllocationType & MEM_RESERVE) && !(ControlArea->FilePointer))
{
return STATUS_INVALID_PARAMETER_9;
}
/* Based sections not supported */
ASSERT(Section->Address.StartingVpn == 0);
/* These flags/parameters are not supported */
/* This flag determines alignment, but ARM3 does not yet support it */
ASSERT((AllocationType & MEM_DOS_LIM) == 0);
ASSERT((AllocationType & MEM_RESERVE) == 0);
ASSERT(Process->VmTopDown == 0);
ASSERT(Section->u.Flags.CopyOnWrite == FALSE);
ASSERT(ZeroBits == 0);
/* First, increase the map count. No purging is supported yet */
Status = MiCheckPurgeAndUpMapCount(ControlArea, FALSE);
ASSERT(NT_SUCCESS(Status));
if (!NT_SUCCESS(Status)) return Status;
/* Check if the caller specified the view size */
if (!(*ViewSize))
@ -742,29 +810,57 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
SectionOffset->LowPart &= ~((ULONG)_64K - 1);
}
/* We must be dealing with a 64KB aligned offset */
/* We must be dealing with a 64KB aligned offset. This is a Windows ASSERT */
ASSERT((SectionOffset->LowPart & ((ULONG)_64K - 1)) == 0);
/* It's illegal to try to map more than 2GB */
/* FIXME: Should dereference the control area */
if (*ViewSize >= 0x80000000) return STATUS_INVALID_VIEW_SIZE;
/* Windows ASSERTs for this flag */
ASSERT(ControlArea->u.Flags.GlobalOnlyPerSession == 0);
/* Get the subsection. We don't support LARGE_CONTROL_AREA in ARM3 */
ASSERT(ControlArea->u.Flags.Rom == 0);
Subsection = (PSUBSECTION)(ControlArea + 1);
/* Sections with extended segments are not supported in ARM3 */
ASSERT(Segment->SegmentFlags.TotalNumberOfPtes4132 == 0);
/* Within this section, figure out which PTEs will describe the view */
PteOffset = (PFN_NUMBER)(SectionOffset->QuadPart >> PAGE_SHIFT);
/* The offset must be in this segment's PTE chunk and it must be valid */
/* The offset must be in this segment's PTE chunk and it must be valid. Windows ASSERTs */
ASSERT(PteOffset < Segment->TotalNumberOfPtes);
ASSERT(((SectionOffset->QuadPart + *ViewSize + PAGE_SIZE - 1) >> PAGE_SHIFT) >= PteOffset);
/* In ARM3, only one subsection is used for now. It must contain these PTEs */
ASSERT(PteOffset < Subsection->PtesInSubsection);
/* In ARM3, only page-file backed sections (shared memory) are supported now */
ASSERT(ControlArea->FilePointer == NULL);
/* Windows ASSERTs for this too -- there must be a subsection base address */
ASSERT(Subsection->SubsectionBase != NULL);
/* In ARM3, only MEM_COMMIT is supported for now. The PTEs must've been committed */
ASSERT(Segment->NumberOfCommittedPages >= Segment->TotalNumberOfPtes);
/* Compute how much commit space the segment will take */
if ((CommitSize) && (Segment->NumberOfCommittedPages < Segment->TotalNumberOfPtes))
{
PointerPte = &Subsection->SubsectionBase[PteOffset];
LastPte = PointerPte + BYTES_TO_PAGES(CommitSize);
QuotaCharge = LastPte - PointerPte;
}
/* ARM3 does not currently support large pages */
ASSERT(Segment->SegmentFlags.LargePages == 0);
/* Did the caller specify an address? */
if (!(*BaseAddress))
if (!(*BaseAddress) && !(Section->Address.StartingVpn))
{
/* ARM3 does not support these flags yet */
ASSERT(Process->VmTopDown == 0);
ASSERT(ZeroBits == 0);
/* Which way should we search? */
if (AllocationType & MEM_TOP_DOWN)
{
@ -787,21 +883,43 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
&StartAddress);
ASSERT(NT_SUCCESS(Status));
}
/* Get the ending address, which is the last piece we need for the VAD */
EndingAddress = (StartAddress + *ViewSize - 1) | (PAGE_SIZE - 1);
}
else
{
/* This (rather easy) code path is not yet implemented */
UNIMPLEMENTED;
while (TRUE);
}
/* Is it SEC_BASED, or did the caller manually specify an address? */
if (!(*BaseAddress))
{
/* It is a SEC_BASED mapping, use the address that was generated */
StartAddress = Section->Address.StartingVpn + SectionOffset->LowPart;
DPRINT("BASED: 0x%p\n", StartAddress);
}
else
{
/* Just align what the caller gave us */
StartAddress = ROUND_UP((ULONG_PTR)*BaseAddress, _64K);
}
/* Get the ending address, which is the last piece we need for the VAD */
EndingAddress = (StartAddress + *ViewSize - 1) | (PAGE_SIZE - 1);
/* Get the ending address, which is the last piece we need for the VAD */
EndingAddress = (StartAddress + *ViewSize - 1) | (PAGE_SIZE - 1);
/* Make sure it doesn't conflict with an existing allocation */
if (MiCheckForConflictingNode(StartAddress >> PAGE_SHIFT,
EndingAddress >> PAGE_SHIFT,
&Process->VadRoot))
{
DPRINT1("Conflict with SEC_BASED or manually based section!\n");
return STATUS_CONFLICTING_ADDRESSES; // FIXME: CA Leak
}
}
/* A VAD can now be allocated. Do so and zero it out */
/* FIXME: we are allocating a LONG VAD for ReactOS compatibility only */
ASSERT((AllocationType & MEM_RESERVE) == 0); /* ARM3 does not support this */
Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
ASSERT(Vad);
if (!Vad) return STATUS_INSUFFICIENT_RESOURCES; /* FIXME: CA Leak */
RtlZeroMemory(Vad, sizeof(MMVAD_LONG));
Vad->u4.Banked = (PVOID)0xDEADBABE;
@ -822,10 +940,10 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
/* Finally, write down the first and last prototype PTE */
Vad->FirstPrototypePte = &Subsection->SubsectionBase[PteOffset];
PteOffset += (Vad->EndingVpn - Vad->StartingVpn);
ASSERT(PteOffset < Subsection->PtesInSubsection);
Vad->LastContiguousPte = &Subsection->SubsectionBase[PteOffset];
/* Make sure the last PTE is valid and still within the subsection */
ASSERT(PteOffset < Subsection->PtesInSubsection);
/* Make sure the prototype PTE ranges make sense, this is a Windows ASSERT */
ASSERT(Vad->FirstPrototypePte <= Vad->LastContiguousPte);
/* FIXME: Should setup VAD bitmap */
@ -843,10 +961,48 @@ MiMapViewOfDataSection(IN PCONTROL_AREA ControlArea,
/* Windows stores this for accounting purposes, do so as well */
if (!Segment->u2.FirstMappedVa) Segment->u2.FirstMappedVa = (PVOID)StartAddress;
/* Check if anything was committed */
if (QuotaCharge)
{
/* Set the start and end PTE addresses, and pick the template PTE */
PointerPte = Vad->FirstPrototypePte;
LastPte = PointerPte + BYTES_TO_PAGES(CommitSize);
TempPte = Segment->SegmentPteTemplate;
/* Acquire the commit lock and loop all prototype PTEs to be committed */
KeAcquireGuardedMutexUnsafe(&MmSectionCommitMutex);
while (PointerPte < LastPte)
{
/* Make sure the PTE is already invalid */
if (PointerPte->u.Long == 0)
{
/* And write the invalid PTE */
MI_WRITE_INVALID_PTE(PointerPte, TempPte);
}
else
{
/* The PTE is valid, so skip it */
QuotaExcess++;
}
/* Move to the next PTE */
PointerPte++;
}
/* Now check how many pages exactly we committed, and update accounting */
ASSERT(QuotaCharge >= QuotaExcess);
QuotaCharge -= QuotaExcess;
Segment->NumberOfCommittedPages += QuotaCharge;
ASSERT(Segment->NumberOfCommittedPages <= Segment->TotalNumberOfPtes);
/* Now that we're done, release the lock */
KeReleaseGuardedMutexUnsafe(&MmSectionCommitMutex);
}
/* Finally, let the caller know where, and for what size, the view was mapped */
*ViewSize = (ULONG_PTR)EndingAddress - (ULONG_PTR)StartAddress + 1;
*BaseAddress = (PVOID)StartAddress;
DPRINT1("Start and region: 0x%p, 0x%p\n", *BaseAddress, *ViewSize);
DPRINT("Start and region: 0x%p, 0x%p\n", *BaseAddress, *ViewSize);
return STATUS_SUCCESS;
}
@ -1226,7 +1382,6 @@ MmCreateArm3Section(OUT PVOID *SectionObject,
ASSERT(FileHandle == NULL);
ASSERT(FileObject == NULL);
ASSERT((AllocationAttributes & SEC_LARGE_PAGES) == 0);
ASSERT((AllocationAttributes & SEC_BASED) == 0);
/* Make the same sanity checks that the Nt interface should've validated */
ASSERT((AllocationAttributes & ~(SEC_COMMIT | SEC_RESERVE | SEC_BASED |
@ -1258,10 +1413,10 @@ MmCreateArm3Section(OUT PVOID *SectionObject,
ProtectionMask,
AllocationAttributes);
ASSERT(NT_SUCCESS(Status));
ASSERT(NewSegment != NULL);
/* Set the initial section object data */
Section.InitialPageProtection = SectionPageProtection;
Section.Segment = NULL;
Section.SizeOfSection.QuadPart = NewSegment->SizeOfSegment;
Section.Segment = NewSegment;
@ -1296,6 +1451,49 @@ MmCreateArm3Section(OUT PVOID *SectionObject,
/* Now copy the local section object from the stack into this new object */
RtlCopyMemory(NewSection, &Section, sizeof(SECTION));
NewSection->Address.StartingVpn = 0;
NewSection->u.Flags.UserReference = TRUE;
/* Migrate the attribute into a flag */
if (AllocationAttributes & SEC_NO_CHANGE) NewSection->u.Flags.NoChange = TRUE;
/* If R/W access is not requested, this might eventually become a CoW mapping */
if (!(SectionPageProtection & (PAGE_READWRITE | PAGE_EXECUTE_READWRITE)))
{
NewSection->u.Flags.CopyOnWrite = TRUE;
}
/* Is this a "based" allocation, in which all mappings are identical? */
if (AllocationAttributes & SEC_BASED)
{
/* Convert the flag, and make sure the section isn't too big */
NewSection->u.Flags.Based = TRUE;
if (NewSection->SizeOfSection.QuadPart > (ULONG_PTR)MmHighSectionBase)
{
DPRINT1("BASED section is too large\n");
ObDereferenceObject(NewSection);
return STATUS_NO_MEMORY;
}
/* Lock the VAD tree during the search */
KeAcquireGuardedMutex(&MmSectionBasedMutex);
/* Find an address top-down */
Status = MiFindEmptyAddressRangeDownBasedTree(NewSection->SizeOfSection.LowPart,
(ULONG_PTR)MmHighSectionBase,
_64K,
&MmSectionBasedRoot,
&NewSection->Address.StartingVpn);
ASSERT(NT_SUCCESS(Status));
/* Compute the ending address and insert it into the VAD tree */
NewSection->Address.EndingVpn = NewSection->Address.StartingVpn +
NewSection->SizeOfSection.LowPart -
1;
MiInsertBasedSection(NewSection);
/* Finally release the lock */
KeReleaseGuardedMutex(&MmSectionBasedMutex);
}
/* Return the object and the creation status */
*SectionObject = (PVOID)NewSection;
@ -1421,7 +1619,7 @@ MmMapViewOfArm3Section(IN PVOID SectionObject,
if (!Process->VmDeleted)
{
/* Do the actual mapping */
DPRINT1("Mapping ARM3 data section\n");
DPRINT("Mapping ARM3 data section\n");
Status = MiMapViewOfDataSection(ControlArea,
Process,
BaseAddress,
@ -1494,6 +1692,30 @@ MmUnmapViewInSessionSpace(IN PVOID MappedBase)
return STATUS_NOT_IMPLEMENTED;
}
/*
* @implemented
*/
NTSTATUS
NTAPI
MmUnmapViewInSystemSpace(IN PVOID MappedBase)
{
PMEMORY_AREA MemoryArea;
PAGED_CODE();
/* Was this mapped by RosMm? */
MemoryArea = MmLocateMemoryAreaByAddress(MmGetKernelAddressSpace(), MappedBase);
if ((MemoryArea) && (MemoryArea->Type != MEMORY_AREA_OWNED_BY_ARM3))
{
return MiRosUnmapViewInSystemSpace(MappedBase);
}
/* It was not, call the ARM3 routine */
ASSERT(FALSE);
return STATUS_SUCCESS;
// DPRINT("ARM3 unmapping\n");
// return MiUnmapViewInSystemSpace(&MmSession, MappedBase);
}
/* SYSTEM CALLS ***************************************************************/
NTSTATUS

View File

@ -161,6 +161,21 @@ MiInsertVad(IN PMMVAD Vad,
MiInsertNode(&Process->VadRoot, (PVOID)Vad, Parent, Result);
}
VOID
NTAPI
MiInsertBasedSection(IN PSECTION Section)
{
TABLE_SEARCH_RESULT Result;
PMMADDRESS_NODE Parent = NULL;
ASSERT(Section->Address.EndingVpn >= Section->Address.StartingVpn);
/* Find the parent VAD and where this child should be inserted */
Result = RtlpFindAvlTableNodeOrParent(&MmSectionBasedRoot, (PVOID)Section->Address.StartingVpn, &Parent);
ASSERT(Result != TableFoundNode);
ASSERT((Parent != NULL) || (Result == TableEmptyTree));
MiInsertNode(&MmSectionBasedRoot, &Section->Address, Parent, Result);
}
VOID
NTAPI
MiRemoveNode(IN PMMADDRESS_NODE Node,
@ -471,4 +486,101 @@ MiFindEmptyAddressRangeDownTree(IN SIZE_T Length,
return TableFoundNode;
}
NTSTATUS
NTAPI
MiFindEmptyAddressRangeDownBasedTree(IN SIZE_T Length,
IN ULONG_PTR BoundaryAddress,
IN ULONG_PTR Alignment,
IN PMM_AVL_TABLE Table,
OUT PULONG_PTR Base)
{
PMMADDRESS_NODE Node, LowestNode;
ULONG_PTR LowVpn, BestVpn;
/* Sanity checks */
ASSERT(Table == &MmSectionBasedRoot);
ASSERT(BoundaryAddress);
ASSERT(BoundaryAddress <= ((ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1));
/* Compute page length, make sure the boundary address is valid */
Length = ROUND_TO_PAGES(Length);
if ((BoundaryAddress + 1) < Length) return STATUS_NO_MEMORY;
/* Check if the table is empty */
BestVpn = ROUND_UP(BoundaryAddress + 1 - Length, Alignment);
if (Table->NumberGenericTableElements == 0)
{
/* Tree is empty, the candidate address is already the best one */
*Base = BestVpn;
return STATUS_SUCCESS;
}
/* Go to the right-most node which should be the biggest address */
Node = Table->BalancedRoot.RightChild;
while (RtlRightChildAvl(Node)) Node = RtlRightChildAvl(Node);
/* Check if we can fit in here */
LowVpn = ROUND_UP(Node->EndingVpn, Alignment);
if ((LowVpn < BoundaryAddress) && (Length < (BoundaryAddress - LowVpn)))
{
/* Return the address */
*Base = ROUND_UP(BoundaryAddress - Length, Alignment);
return STATUS_SUCCESS;
}
/* Now loop the Vad nodes */
do
{
/* Break out if we've reached the last node */
LowestNode = MiGetPreviousNode(Node);
if (!LowestNode) break;
/* Check if this node could contain the requested address */
LowVpn = ROUND_UP(LowestNode->EndingVpn + 1, Alignment);
if ((LowestNode->EndingVpn < BestVpn) &&
(Length <= (Node->StartingVpn - LowVpn)))
{
/* Check if it fits in perfectly */
if ((BestVpn > LowestNode->EndingVpn) &&
(BoundaryAddress < Node->StartingVpn))
{
/* Return the optimal VPN address */
*Base = BestVpn;
return STATUS_SUCCESS;
}
/* It doesn't, check if it can partly fit */
if (Node->StartingVpn > LowVpn)
{
/* Return an aligned base address within this node */
*Base = ROUND_UP(Node->StartingVpn - Length, Alignment);
return STATUS_SUCCESS;
}
}
/* Move to the next node */
Node = LowestNode;
} while (TRUE);
/* Check if there's enough space before the lowest Vad */
if ((Node->StartingVpn > (ULONG_PTR)MI_LOWEST_VAD_ADDRESS) &&
((Node->StartingVpn - (ULONG_PTR)MI_LOWEST_VAD_ADDRESS) > Length))
{
/* Check if it fits in perfectly */
if (BoundaryAddress < Node->StartingVpn)
{
/* Return the optimal VPN address */
*Base = BestVpn;
return STATUS_SUCCESS;
}
/* Return an aligned base address within this node */
*Base = ROUND_UP(Node->StartingVpn - Length, Alignment);
return STATUS_SUCCESS;
}
/* No address space left at all */
return STATUS_NO_MEMORY;
}
/* EOF */

View File

@ -2855,6 +2855,10 @@ MmInitSectionImplementation(VOID)
DPRINT("Creating Section Object Type\n");
/* Initialize the section based root */
ASSERT(MmSectionBasedRoot.NumberGenericTableElements == 0);
MmSectionBasedRoot.BalancedRoot.u1.Parent = &MmSectionBasedRoot.BalancedRoot;
/* Initialize the Section object type */
RtlZeroMemory(&ObjectTypeInitializer, sizeof(ObjectTypeInitializer));
RtlInitUnicodeString(&Name, L"Section");
@ -4231,6 +4235,7 @@ MmUnmapViewOfSection(PEPROCESS Process,
MemoryArea->Type != MEMORY_AREA_SECTION_VIEW ||
MemoryArea->DeleteInProgress)
{
ASSERT(MemoryArea->Type != MEMORY_AREA_OWNED_BY_ARM3);
MmUnlockAddressSpace(AddressSpace);
return STATUS_NOT_MAPPED_VIEW;
}
@ -4538,7 +4543,7 @@ MmMapViewOfSection(IN PVOID SectionObject,
if (MiIsRosSectionObject(SectionObject) == FALSE)
{
DPRINT1("Mapping ARM3 section into %s\n", Process->ImageFileName);
DPRINT("Mapping ARM3 section into %s\n", Process->ImageFileName);
return MmMapViewOfArm3Section(SectionObject,
Process,
BaseAddress,
@ -4875,7 +4880,6 @@ MmMapViewInSystemSpace (IN PVOID SectionObject,
if (MiIsRosSectionObject(SectionObject) == FALSE)
{
DPRINT1("ARM3 System Mapping\n");
return MiMapViewInSystemSpace(SectionObject,
&MmSession,
MappedBase,
@ -4917,11 +4921,9 @@ MmMapViewInSystemSpace (IN PVOID SectionObject,
return Status;
}
/*
* @implemented
*/
NTSTATUS NTAPI
MmUnmapViewInSystemSpace (IN PVOID MappedBase)
NTSTATUS
NTAPI
MiRosUnmapViewInSystemSpace(IN PVOID MappedBase)
{
PMMSUPPORT AddressSpace;
NTSTATUS Status;
@ -4939,7 +4941,6 @@ MmUnmapViewInSystemSpace (IN PVOID MappedBase)
return Status;
}
/**********************************************************************
* NAME EXPORTED
* MmCreateSection@