powerpc/powernv/ioda1: Introduce PNV_IODA1_DMA32_SEGSIZE

Currently, there is one macro (TCE32_TABLE_SIZE) representing the
TCE table size for one DMA32 segment. The constant representing
the DMA32 segment size (1 << 28) is still used in the code.

This defines PNV_IODA1_DMA32_SEGSIZE representing one DMA32
segment size. the TCE table size can be calcualted when the page
has fixed 4KB size. So all the related calculation depends on one
macro (PNV_IODA1_DMA32_SEGSIZE). No logical changes introduced.

Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Reviewed-By: Alistair Popple <alistair@popple.id.au>
Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Gavin Shan 2016-05-03 15:41:33 +10:00 committed by Michael Ellerman
parent b30d936f6f
commit acce971c0e

View File

@ -50,9 +50,7 @@
#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */ #define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */ #define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
#define POWERNV_IOMMU_DEFAULT_LEVELS 1 #define POWERNV_IOMMU_DEFAULT_LEVELS 1
#define POWERNV_IOMMU_MAX_LEVELS 5 #define POWERNV_IOMMU_MAX_LEVELS 5
@ -2037,7 +2035,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
struct page *tce_mem = NULL; struct page *tce_mem = NULL;
struct iommu_table *tbl; struct iommu_table *tbl;
unsigned int i; unsigned int tce32_segsz, i;
int64_t rc; int64_t rc;
void *addr; void *addr;
@ -2057,29 +2055,34 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
/* Grab a 32-bit TCE table */ /* Grab a 32-bit TCE table */
pe->tce32_seg = base; pe->tce32_seg = base;
pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
(base << 28), ((base + segs) << 28) - 1); base * PNV_IODA1_DMA32_SEGSIZE,
(base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1);
/* XXX Currently, we allocate one big contiguous table for the /* XXX Currently, we allocate one big contiguous table for the
* TCEs. We only really need one chunk per 256M of TCE space * TCEs. We only really need one chunk per 256M of TCE space
* (ie per segment) but that's an optimization for later, it * (ie per segment) but that's an optimization for later, it
* requires some added smarts with our get/put_tce implementation * requires some added smarts with our get/put_tce implementation
*
* Each TCE page is 4KB in size and each TCE entry occupies 8
* bytes
*/ */
tce32_segsz = PNV_IODA1_DMA32_SEGSIZE >> (IOMMU_PAGE_SHIFT_4K - 3);
tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL, tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
get_order(TCE32_TABLE_SIZE * segs)); get_order(tce32_segsz * segs));
if (!tce_mem) { if (!tce_mem) {
pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
goto fail; goto fail;
} }
addr = page_address(tce_mem); addr = page_address(tce_mem);
memset(addr, 0, TCE32_TABLE_SIZE * segs); memset(addr, 0, tce32_segsz * segs);
/* Configure HW */ /* Configure HW */
for (i = 0; i < segs; i++) { for (i = 0; i < segs; i++) {
rc = opal_pci_map_pe_dma_window(phb->opal_id, rc = opal_pci_map_pe_dma_window(phb->opal_id,
pe->pe_number, pe->pe_number,
base + i, 1, base + i, 1,
__pa(addr) + TCE32_TABLE_SIZE * i, __pa(addr) + tce32_segsz * i,
TCE32_TABLE_SIZE, 0x1000); tce32_segsz, IOMMU_PAGE_SIZE_4K);
if (rc) { if (rc) {
pe_err(pe, " Failed to configure 32-bit TCE table," pe_err(pe, " Failed to configure 32-bit TCE table,"
" err %ld\n", rc); " err %ld\n", rc);
@ -2088,8 +2091,9 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
} }
/* Setup linux iommu table */ /* Setup linux iommu table */
pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs, pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs,
base << 28, IOMMU_PAGE_SHIFT_4K); base * PNV_IODA1_DMA32_SEGSIZE,
IOMMU_PAGE_SHIFT_4K);
/* OPAL variant of P7IOC SW invalidated TCEs */ /* OPAL variant of P7IOC SW invalidated TCEs */
if (phb->ioda.tce_inval_reg) if (phb->ioda.tce_inval_reg)
@ -2119,7 +2123,7 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
if (pe->tce32_seg >= 0) if (pe->tce32_seg >= 0)
pe->tce32_seg = -1; pe->tce32_seg = -1;
if (tce_mem) if (tce_mem)
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); __free_pages(tce_mem, get_order(tce32_segsz * segs));
if (tbl) { if (tbl) {
pnv_pci_unlink_table_and_group(tbl, &pe->table_group); pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
iommu_free_table(tbl, "pnv"); iommu_free_table(tbl, "pnv");
@ -3456,7 +3460,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
mutex_init(&phb->ioda.pe_list_mutex); mutex_init(&phb->ioda.pe_list_mutex);
/* Calculate how many 32-bit TCE segments we have */ /* Calculate how many 32-bit TCE segments we have */
phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28; phb->ioda.tce32_count = phb->ioda.m32_pci_base /
PNV_IODA1_DMA32_SEGSIZE;
#if 0 /* We should really do that ... */ #if 0 /* We should really do that ... */
rc = opal_pci_set_phb_mem_window(opal->phb_id, rc = opal_pci_set_phb_mem_window(opal->phb_id,