mirror of
https://github.com/qemu/qemu.git
synced 2024-12-02 16:23:35 +08:00
intel_iommu: refine SL-PEs reserved fields checking
1. split the resevred fields arrays into two ones, 2. large page only effect for L2(2M) and L3(1G), so remove checking of L1 and L4 for large page. Signed-off-by: Zhang, Qi <qi1.zhang@intel.com> Signed-off-by: Qi, Yadong <yadong.qi@intel.com> Message-Id: <20191125003321.5669-2-yadong.qi@intel.com> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
509ec36c1e
commit
ce586f3b8d
@ -910,19 +910,23 @@ static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s,
|
||||
|
||||
/*
|
||||
* Rsvd field masks for spte:
|
||||
* Index [1] to [4] 4k pages
|
||||
* Index [5] to [8] large pages
|
||||
* vtd_spte_rsvd 4k pages
|
||||
* vtd_spte_rsvd_large large pages
|
||||
*/
|
||||
static uint64_t vtd_paging_entry_rsvd_field[9];
|
||||
static uint64_t vtd_spte_rsvd[5];
|
||||
static uint64_t vtd_spte_rsvd_large[5];
|
||||
|
||||
static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
|
||||
{
|
||||
if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) {
|
||||
/* Maybe large page */
|
||||
return slpte & vtd_paging_entry_rsvd_field[level + 4];
|
||||
} else {
|
||||
return slpte & vtd_paging_entry_rsvd_field[level];
|
||||
uint64_t rsvd_mask = vtd_spte_rsvd[level];
|
||||
|
||||
if ((level == VTD_SL_PD_LEVEL || level == VTD_SL_PDP_LEVEL) &&
|
||||
(slpte & VTD_SL_PT_PAGE_SIZE_MASK)) {
|
||||
/* large page */
|
||||
rsvd_mask = vtd_spte_rsvd_large[level];
|
||||
}
|
||||
|
||||
return slpte & rsvd_mask;
|
||||
}
|
||||
|
||||
/* Find the VTD address space associated with a given bus number */
|
||||
@ -3549,15 +3553,14 @@ static void vtd_init(IntelIOMMUState *s)
|
||||
/*
|
||||
* Rsvd field masks for spte
|
||||
*/
|
||||
vtd_paging_entry_rsvd_field[0] = ~0ULL;
|
||||
vtd_paging_entry_rsvd_field[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits);
|
||||
vtd_paging_entry_rsvd_field[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits);
|
||||
vtd_paging_entry_rsvd_field[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits);
|
||||
vtd_paging_entry_rsvd_field[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits);
|
||||
vtd_paging_entry_rsvd_field[5] = VTD_SPTE_LPAGE_L1_RSVD_MASK(s->aw_bits);
|
||||
vtd_paging_entry_rsvd_field[6] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits);
|
||||
vtd_paging_entry_rsvd_field[7] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits);
|
||||
vtd_paging_entry_rsvd_field[8] = VTD_SPTE_LPAGE_L4_RSVD_MASK(s->aw_bits);
|
||||
vtd_spte_rsvd[0] = ~0ULL;
|
||||
vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits);
|
||||
vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits);
|
||||
vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits);
|
||||
vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits);
|
||||
|
||||
vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits);
|
||||
vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits);
|
||||
|
||||
if (x86_iommu_ir_supported(x86_iommu)) {
|
||||
s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV;
|
||||
|
@ -395,14 +395,11 @@ typedef union VTDInvDesc VTDInvDesc;
|
||||
(0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
|
||||
#define VTD_SPTE_PAGE_L4_RSVD_MASK(aw) \
|
||||
(0x880ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
|
||||
#define VTD_SPTE_LPAGE_L1_RSVD_MASK(aw) \
|
||||
(0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
|
||||
|
||||
#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw) \
|
||||
(0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
|
||||
#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw) \
|
||||
(0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
|
||||
#define VTD_SPTE_LPAGE_L4_RSVD_MASK(aw) \
|
||||
(0x880ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM))
|
||||
|
||||
/* Information about page-selective IOTLB invalidate */
|
||||
struct VTDIOTLBPageInvInfo {
|
||||
|
Loading…
Reference in New Issue
Block a user