mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 12:14:01 +08:00
scsi: lpfc: Replace PCI pool old API
The PCI pool API is deprecated. This commit replaces the PCI pool old API by the appropriate function with the DMA pool API. It also updates some comments, accordingly. Signed-off-by: Romain Perier <romain.perier@collabora.com> Reviewed-by: Peter Senna Tschudin <peter.senna@collabora.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
decab9a6fb
commit
771db5c0e3
@ -946,14 +946,14 @@ struct lpfc_hba {
|
||||
struct list_head active_rrq_list;
|
||||
spinlock_t hbalock;
|
||||
|
||||
/* pci_mem_pools */
|
||||
struct pci_pool *lpfc_sg_dma_buf_pool;
|
||||
struct pci_pool *lpfc_mbuf_pool;
|
||||
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
|
||||
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
|
||||
struct pci_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
|
||||
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
|
||||
struct pci_pool *txrdy_payload_pool;
|
||||
/* dma_mem_pools */
|
||||
struct dma_pool *lpfc_sg_dma_buf_pool;
|
||||
struct dma_pool *lpfc_mbuf_pool;
|
||||
struct dma_pool *lpfc_hrb_pool; /* header receive buffer pool */
|
||||
struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
|
||||
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
|
||||
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
|
||||
struct dma_pool *txrdy_payload_pool;
|
||||
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
|
||||
|
||||
mempool_t *mbox_mem_pool;
|
||||
|
@ -3275,7 +3275,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
|
||||
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
|
||||
list) {
|
||||
list_del(&sb->list);
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
||||
sb->dma_handle);
|
||||
kfree(sb);
|
||||
phba->total_scsi_bufs--;
|
||||
@ -3286,7 +3286,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
|
||||
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
|
||||
list) {
|
||||
list_del(&sb->list);
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
||||
sb->dma_handle);
|
||||
kfree(sb);
|
||||
phba->total_scsi_bufs--;
|
||||
@ -3317,7 +3317,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_put, list) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
phba->total_nvme_bufs--;
|
||||
@ -3328,7 +3328,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_get, list) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
phba->total_nvme_bufs--;
|
||||
@ -3640,7 +3640,7 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
|
||||
list_remove_head(&scsi_sgl_list, psb,
|
||||
struct lpfc_scsi_buf, list);
|
||||
if (psb) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
}
|
||||
@ -3774,7 +3774,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
|
||||
list_remove_head(&nvme_sgl_list, lpfc_ncmd,
|
||||
struct lpfc_nvme_buf, list);
|
||||
if (lpfc_ncmd) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
@ -6846,8 +6846,8 @@ lpfc_create_shost(struct lpfc_hba *phba)
|
||||
if (phba->nvmet_support) {
|
||||
/* Only 1 vport (pport) will support NVME target */
|
||||
if (phba->txrdy_payload_pool == NULL) {
|
||||
phba->txrdy_payload_pool = pci_pool_create(
|
||||
"txrdy_pool", phba->pcidev,
|
||||
phba->txrdy_payload_pool = dma_pool_create(
|
||||
"txrdy_pool", &phba->pcidev->dev,
|
||||
TXRDY_PAYLOAD_LEN, 16, 0);
|
||||
if (phba->txrdy_payload_pool) {
|
||||
phba->targetport = NULL;
|
||||
|
@ -97,8 +97,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
||||
i = SLI4_PAGE_SIZE;
|
||||
|
||||
phba->lpfc_sg_dma_buf_pool =
|
||||
pci_pool_create("lpfc_sg_dma_buf_pool",
|
||||
phba->pcidev,
|
||||
dma_pool_create("lpfc_sg_dma_buf_pool",
|
||||
&phba->pcidev->dev,
|
||||
phba->cfg_sg_dma_buf_size,
|
||||
i, 0);
|
||||
if (!phba->lpfc_sg_dma_buf_pool)
|
||||
@ -106,15 +106,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
||||
|
||||
} else {
|
||||
phba->lpfc_sg_dma_buf_pool =
|
||||
pci_pool_create("lpfc_sg_dma_buf_pool",
|
||||
phba->pcidev, phba->cfg_sg_dma_buf_size,
|
||||
dma_pool_create("lpfc_sg_dma_buf_pool",
|
||||
&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
|
||||
align, 0);
|
||||
|
||||
if (!phba->lpfc_sg_dma_buf_pool)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
|
||||
phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
|
||||
LPFC_BPL_SIZE,
|
||||
align, 0);
|
||||
if (!phba->lpfc_mbuf_pool)
|
||||
@ -128,7 +128,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
||||
pool->max_count = 0;
|
||||
pool->current_count = 0;
|
||||
for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
|
||||
pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
|
||||
pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
|
||||
GFP_KERNEL, &pool->elements[i].phys);
|
||||
if (!pool->elements[i].virt)
|
||||
goto fail_free_mbuf_pool;
|
||||
@ -152,21 +152,21 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
||||
sizeof(struct lpfc_node_rrq));
|
||||
if (!phba->rrq_pool)
|
||||
goto fail_free_nlp_mem_pool;
|
||||
phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
|
||||
phba->pcidev,
|
||||
phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
|
||||
&phba->pcidev->dev,
|
||||
LPFC_HDR_BUF_SIZE, align, 0);
|
||||
if (!phba->lpfc_hrb_pool)
|
||||
goto fail_free_rrq_mem_pool;
|
||||
|
||||
phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
|
||||
phba->pcidev,
|
||||
phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
|
||||
&phba->pcidev->dev,
|
||||
LPFC_DATA_BUF_SIZE, align, 0);
|
||||
if (!phba->lpfc_drb_pool)
|
||||
goto fail_free_hrb_pool;
|
||||
phba->lpfc_hbq_pool = NULL;
|
||||
} else {
|
||||
phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
|
||||
phba->pcidev, LPFC_BPL_SIZE, align, 0);
|
||||
phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
|
||||
&phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
|
||||
if (!phba->lpfc_hbq_pool)
|
||||
goto fail_free_nlp_mem_pool;
|
||||
phba->lpfc_hrb_pool = NULL;
|
||||
@ -185,10 +185,10 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
||||
|
||||
return 0;
|
||||
fail_free_drb_pool:
|
||||
pci_pool_destroy(phba->lpfc_drb_pool);
|
||||
dma_pool_destroy(phba->lpfc_drb_pool);
|
||||
phba->lpfc_drb_pool = NULL;
|
||||
fail_free_hrb_pool:
|
||||
pci_pool_destroy(phba->lpfc_hrb_pool);
|
||||
dma_pool_destroy(phba->lpfc_hrb_pool);
|
||||
phba->lpfc_hrb_pool = NULL;
|
||||
fail_free_rrq_mem_pool:
|
||||
mempool_destroy(phba->rrq_pool);
|
||||
@ -201,14 +201,14 @@ fail_free_drb_pool:
|
||||
phba->mbox_mem_pool = NULL;
|
||||
fail_free_mbuf_pool:
|
||||
while (i--)
|
||||
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
|
||||
dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
|
||||
pool->elements[i].phys);
|
||||
kfree(pool->elements);
|
||||
fail_free_lpfc_mbuf_pool:
|
||||
pci_pool_destroy(phba->lpfc_mbuf_pool);
|
||||
dma_pool_destroy(phba->lpfc_mbuf_pool);
|
||||
phba->lpfc_mbuf_pool = NULL;
|
||||
fail_free_dma_buf_pool:
|
||||
pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
|
||||
dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
|
||||
phba->lpfc_sg_dma_buf_pool = NULL;
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
@ -218,8 +218,8 @@ int
|
||||
lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
|
||||
{
|
||||
phba->lpfc_nvmet_drb_pool =
|
||||
pci_pool_create("lpfc_nvmet_drb_pool",
|
||||
phba->pcidev, LPFC_NVMET_DATA_BUF_SIZE,
|
||||
dma_pool_create("lpfc_nvmet_drb_pool",
|
||||
&phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
|
||||
SGL_ALIGN_SZ, 0);
|
||||
if (!phba->lpfc_nvmet_drb_pool) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -248,20 +248,20 @@ lpfc_mem_free(struct lpfc_hba *phba)
|
||||
/* Free HBQ pools */
|
||||
lpfc_sli_hbqbuf_free_all(phba);
|
||||
if (phba->lpfc_nvmet_drb_pool)
|
||||
pci_pool_destroy(phba->lpfc_nvmet_drb_pool);
|
||||
dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
|
||||
phba->lpfc_nvmet_drb_pool = NULL;
|
||||
if (phba->lpfc_drb_pool)
|
||||
pci_pool_destroy(phba->lpfc_drb_pool);
|
||||
dma_pool_destroy(phba->lpfc_drb_pool);
|
||||
phba->lpfc_drb_pool = NULL;
|
||||
if (phba->lpfc_hrb_pool)
|
||||
pci_pool_destroy(phba->lpfc_hrb_pool);
|
||||
dma_pool_destroy(phba->lpfc_hrb_pool);
|
||||
phba->lpfc_hrb_pool = NULL;
|
||||
if (phba->txrdy_payload_pool)
|
||||
pci_pool_destroy(phba->txrdy_payload_pool);
|
||||
dma_pool_destroy(phba->txrdy_payload_pool);
|
||||
phba->txrdy_payload_pool = NULL;
|
||||
|
||||
if (phba->lpfc_hbq_pool)
|
||||
pci_pool_destroy(phba->lpfc_hbq_pool);
|
||||
dma_pool_destroy(phba->lpfc_hbq_pool);
|
||||
phba->lpfc_hbq_pool = NULL;
|
||||
|
||||
if (phba->rrq_pool)
|
||||
@ -282,15 +282,15 @@ lpfc_mem_free(struct lpfc_hba *phba)
|
||||
|
||||
/* Free MBUF memory pool */
|
||||
for (i = 0; i < pool->current_count; i++)
|
||||
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
|
||||
dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
|
||||
pool->elements[i].phys);
|
||||
kfree(pool->elements);
|
||||
|
||||
pci_pool_destroy(phba->lpfc_mbuf_pool);
|
||||
dma_pool_destroy(phba->lpfc_mbuf_pool);
|
||||
phba->lpfc_mbuf_pool = NULL;
|
||||
|
||||
/* Free DMA buffer memory pool */
|
||||
pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
|
||||
dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
|
||||
phba->lpfc_sg_dma_buf_pool = NULL;
|
||||
|
||||
/* Free Device Data memory pool */
|
||||
@ -379,7 +379,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
|
||||
* @handle: used to return the DMA-mapped address of the mbuf
|
||||
*
|
||||
* Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
|
||||
* Allocates from generic pci_pool_alloc function first and if that fails and
|
||||
* Allocates from generic dma_pool_alloc function first and if that fails and
|
||||
* mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
|
||||
* HBA's pool.
|
||||
*
|
||||
@ -397,7 +397,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
|
||||
unsigned long iflags;
|
||||
void *ret;
|
||||
|
||||
ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
|
||||
ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
|
||||
@ -433,7 +433,7 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
|
||||
pool->elements[pool->current_count].phys = dma;
|
||||
pool->current_count++;
|
||||
} else {
|
||||
pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
|
||||
dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -470,7 +470,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
|
||||
* @handle: used to return the DMA-mapped address of the nvmet_buf
|
||||
*
|
||||
* Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
|
||||
* PCI pool. Allocates from generic pci_pool_alloc function.
|
||||
* PCI pool. Allocates from generic dma_pool_alloc function.
|
||||
*
|
||||
* Returns:
|
||||
* pointer to the allocated nvmet_buf on success
|
||||
@ -481,7 +481,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
|
||||
ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -497,7 +497,7 @@ lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
|
||||
void
|
||||
lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
|
||||
{
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -522,7 +522,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
|
||||
if (!hbqbp)
|
||||
return NULL;
|
||||
|
||||
hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
|
||||
hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
|
||||
&hbqbp->dbuf.phys);
|
||||
if (!hbqbp->dbuf.virt) {
|
||||
kfree(hbqbp);
|
||||
@ -547,7 +547,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
|
||||
void
|
||||
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
|
||||
{
|
||||
pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
|
||||
dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
|
||||
kfree(hbqbp);
|
||||
return;
|
||||
}
|
||||
@ -574,16 +574,16 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
|
||||
if (!dma_buf)
|
||||
return NULL;
|
||||
|
||||
dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
|
||||
dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
|
||||
&dma_buf->hbuf.phys);
|
||||
if (!dma_buf->hbuf.virt) {
|
||||
kfree(dma_buf);
|
||||
return NULL;
|
||||
}
|
||||
dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
|
||||
dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
|
||||
&dma_buf->dbuf.phys);
|
||||
if (!dma_buf->dbuf.virt) {
|
||||
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
|
||||
dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
|
||||
dma_buf->hbuf.phys);
|
||||
kfree(dma_buf);
|
||||
return NULL;
|
||||
@ -607,8 +607,8 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
|
||||
void
|
||||
lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
|
||||
{
|
||||
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
|
||||
pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
|
||||
dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
|
||||
dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
|
||||
kfree(dmab);
|
||||
}
|
||||
|
||||
@ -634,16 +634,16 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
|
||||
if (!dma_buf)
|
||||
return NULL;
|
||||
|
||||
dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
|
||||
dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
|
||||
&dma_buf->hbuf.phys);
|
||||
if (!dma_buf->hbuf.virt) {
|
||||
kfree(dma_buf);
|
||||
return NULL;
|
||||
}
|
||||
dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_nvmet_drb_pool,
|
||||
dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
|
||||
GFP_KERNEL, &dma_buf->dbuf.phys);
|
||||
if (!dma_buf->dbuf.virt) {
|
||||
pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
|
||||
dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
|
||||
dma_buf->hbuf.phys);
|
||||
kfree(dma_buf);
|
||||
return NULL;
|
||||
@ -667,8 +667,8 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
|
||||
void
|
||||
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
|
||||
{
|
||||
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
|
||||
pci_pool_free(phba->lpfc_nvmet_drb_pool,
|
||||
dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
|
||||
dma_pool_free(phba->lpfc_nvmet_drb_pool,
|
||||
dmab->dbuf.virt, dmab->dbuf.phys);
|
||||
kfree(dmab);
|
||||
}
|
||||
|
@ -1939,7 +1939,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
|
||||
* pci bus space for an I/O. The DMA buffer includes the
|
||||
* number of SGE's necessary to support the sg_tablesize.
|
||||
*/
|
||||
lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
|
||||
GFP_KERNEL,
|
||||
&lpfc_ncmd->dma_handle);
|
||||
if (!lpfc_ncmd->data) {
|
||||
@ -1950,7 +1950,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
|
||||
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
break;
|
||||
@ -1961,7 +1961,7 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
|
||||
/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
|
||||
iotag = lpfc_sli_next_iotag(phba, pwqeq);
|
||||
if (iotag == 0) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
|
@ -175,7 +175,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
unsigned long iflag;
|
||||
|
||||
if (ctxp->txrdy) {
|
||||
pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
|
||||
dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
|
||||
ctxp->txrdy_phys);
|
||||
ctxp->txrdy = NULL;
|
||||
ctxp->txrdy_phys = 0;
|
||||
@ -1909,7 +1909,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
|
||||
case NVMET_FCOP_WRITEDATA:
|
||||
/* Words 0 - 2 : The first sg segment */
|
||||
txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
|
||||
txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
|
||||
GFP_KERNEL, &physaddr);
|
||||
if (!txrdy) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
|
@ -416,7 +416,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
|
||||
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
|
||||
* necessary to support the sg_tablesize.
|
||||
*/
|
||||
psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
|
||||
GFP_KERNEL, &psb->dma_handle);
|
||||
if (!psb->data) {
|
||||
kfree(psb);
|
||||
@ -427,7 +427,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
|
||||
/* Allocate iotag for psb->cur_iocbq. */
|
||||
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
|
||||
if (iotag == 0) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
break;
|
||||
@ -826,7 +826,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
||||
* for the struct fcp_cmnd, struct fcp_rsp and the number
|
||||
* of bde's necessary to support the sg_tablesize.
|
||||
*/
|
||||
psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
|
||||
GFP_KERNEL, &psb->dma_handle);
|
||||
if (!psb->data) {
|
||||
kfree(psb);
|
||||
@ -839,7 +839,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
||||
*/
|
||||
if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
|
||||
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
break;
|
||||
@ -848,7 +848,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
||||
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
break;
|
||||
@ -857,7 +857,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
||||
/* Allocate iotag for psb->cur_iocbq. */
|
||||
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
|
||||
if (iotag == 0) {
|
||||
pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
|
||||
|
@ -17051,7 +17051,7 @@ lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_dmabuf *pcmd = cmdiocb->context2;
|
||||
|
||||
if (pcmd && pcmd->virt)
|
||||
pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
|
||||
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
|
||||
kfree(pcmd);
|
||||
lpfc_sli_release_iocbq(phba, cmdiocb);
|
||||
}
|
||||
@ -17079,7 +17079,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
|
||||
/* Allocate buffer for command payload */
|
||||
pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||
if (pcmd)
|
||||
pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
|
||||
pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
|
||||
&pcmd->phys);
|
||||
if (!pcmd || !pcmd->virt)
|
||||
goto exit;
|
||||
@ -17128,7 +17128,7 @@ exit:
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
|
||||
"2023 Unable to process MDS loopback frame\n");
|
||||
if (pcmd && pcmd->virt)
|
||||
pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
|
||||
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
|
||||
kfree(pcmd);
|
||||
lpfc_sli_release_iocbq(phba, iocbq);
|
||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||
|
Loading…
Reference in New Issue
Block a user