mirror of
https://github.com/qemu/qemu.git
synced 2024-12-01 07:43:35 +08:00
hw/nvme updates
-----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmO+anUACgkQTeGvMW1P Del4HAf/eqW92bf04CpXPzLMG38+FoYQgwkH8Lp390JlSM6kjv3DgPM2ygJXIEYr dV0KQpTcPRGAVCuXCSOUd6FTkAYdcPVtOeJeYhldEYj9cc9Y/9tvcBMthFBkiCU/ 66TL7JEgFmOfVMrLypYpfrPHCN0lQVX+i2+Qruospg6itubAKcYdJq+Hx2WWHzcC hl/qNgSSIGlhQi21ada1iCUC8Q3TZzo54M+0tW4zfybzsG+eNM+ouKDl/qOHtQgP ON/ef9dl+0Dum1l5z7O+sPiXBzZRJR7V+jZOsbf0A98fxqmuu3YacVrH6mY/Zmdx e1xexJt6PVXaIKtoZQKRVn2C0ITjXA== =AvaO -----END PGP SIGNATURE----- Merge tag 'nvme-next-pull-request' of https://gitlab.com/birkelund/qemu into staging hw/nvme updates # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCgAdFiEEUigzqnXi3OaiR2bATeGvMW1PDekFAmO+anUACgkQTeGvMW1P # Del4HAf/eqW92bf04CpXPzLMG38+FoYQgwkH8Lp390JlSM6kjv3DgPM2ygJXIEYr # dV0KQpTcPRGAVCuXCSOUd6FTkAYdcPVtOeJeYhldEYj9cc9Y/9tvcBMthFBkiCU/ # 66TL7JEgFmOfVMrLypYpfrPHCN0lQVX+i2+Qruospg6itubAKcYdJq+Hx2WWHzcC # hl/qNgSSIGlhQi21ada1iCUC8Q3TZzo54M+0tW4zfybzsG+eNM+ouKDl/qOHtQgP # ON/ef9dl+0Dum1l5z7O+sPiXBzZRJR7V+jZOsbf0A98fxqmuu3YacVrH6mY/Zmdx # e1xexJt6PVXaIKtoZQKRVn2C0ITjXA== # =AvaO # -----END PGP SIGNATURE----- # gpg: Signature made Wed 11 Jan 2023 07:51:17 GMT # gpg: using RSA key 522833AA75E2DCE6A24766C04DE1AF316D4F0DE9 # gpg: Good signature from "Klaus Jensen <its@irrelevant.dk>" [full] # gpg: aka "Klaus Jensen <k.jensen@samsung.com>" [full] # Primary key fingerprint: DDCA 4D9C 9EF9 31CC 3468 4272 63D5 6FC5 E55D A838 # Subkey fingerprint: 5228 33AA 75E2 DCE6 A247 66C0 4DE1 AF31 6D4F 0DE9 * tag 'nvme-next-pull-request' of https://gitlab.com/birkelund/qemu: hw/nvme: cleanup error reporting in nvme_init_pci() hw/nvme: clean up confusing use of errp/local_err hw/nvme: fix missing cq eventidx update hw/nvme: fix missing endian conversions for doorbell buffers hw/nvme: rename shadow doorbell related trace events hw/nvme: use QOM accessors Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
a35fa426ee
194
hw/nvme/ctrl.c
194
hw/nvme/ctrl.c
@ -449,7 +449,7 @@ static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pci_dma_read(&n->parent_obj, addr, buf, size);
|
||||
return pci_dma_read(PCI_DEVICE(n), addr, buf, size);
|
||||
}
|
||||
|
||||
static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size)
|
||||
@ -469,7 +469,7 @@ static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, const void *buf, int size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pci_dma_write(&n->parent_obj, addr, buf, size);
|
||||
return pci_dma_write(PCI_DEVICE(n), addr, buf, size);
|
||||
}
|
||||
|
||||
static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid)
|
||||
@ -514,24 +514,27 @@ static uint8_t nvme_sq_empty(NvmeSQueue *sq)
|
||||
|
||||
static void nvme_irq_check(NvmeCtrl *n)
|
||||
{
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
uint32_t intms = ldl_le_p(&n->bar.intms);
|
||||
|
||||
if (msix_enabled(&(n->parent_obj))) {
|
||||
if (msix_enabled(pci)) {
|
||||
return;
|
||||
}
|
||||
if (~intms & n->irq_status) {
|
||||
pci_irq_assert(&n->parent_obj);
|
||||
pci_irq_assert(pci);
|
||||
} else {
|
||||
pci_irq_deassert(&n->parent_obj);
|
||||
pci_irq_deassert(pci);
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
|
||||
{
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
|
||||
if (cq->irq_enabled) {
|
||||
if (msix_enabled(&(n->parent_obj))) {
|
||||
if (msix_enabled(pci)) {
|
||||
trace_pci_nvme_irq_msix(cq->vector);
|
||||
msix_notify(&(n->parent_obj), cq->vector);
|
||||
msix_notify(pci, cq->vector);
|
||||
} else {
|
||||
trace_pci_nvme_irq_pin();
|
||||
assert(cq->vector < 32);
|
||||
@ -546,7 +549,7 @@ static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
|
||||
static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
|
||||
{
|
||||
if (cq->irq_enabled) {
|
||||
if (msix_enabled(&(n->parent_obj))) {
|
||||
if (msix_enabled(PCI_DEVICE(n))) {
|
||||
return;
|
||||
} else {
|
||||
assert(cq->vector < 32);
|
||||
@ -570,7 +573,7 @@ static void nvme_req_clear(NvmeRequest *req)
|
||||
static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma)
|
||||
{
|
||||
if (dma) {
|
||||
pci_dma_sglist_init(&sg->qsg, &n->parent_obj, 0);
|
||||
pci_dma_sglist_init(&sg->qsg, PCI_DEVICE(n), 0);
|
||||
sg->flags = NVME_SG_DMA;
|
||||
} else {
|
||||
qemu_iovec_init(&sg->iov, 0);
|
||||
@ -1331,11 +1334,24 @@ static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
|
||||
}
|
||||
}
|
||||
|
||||
static void nvme_update_cq_eventidx(const NvmeCQueue *cq)
|
||||
{
|
||||
uint32_t v = cpu_to_le32(cq->head);
|
||||
|
||||
trace_pci_nvme_update_cq_eventidx(cq->cqid, cq->head);
|
||||
|
||||
pci_dma_write(PCI_DEVICE(cq->ctrl), cq->ei_addr, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static void nvme_update_cq_head(NvmeCQueue *cq)
|
||||
{
|
||||
pci_dma_read(&cq->ctrl->parent_obj, cq->db_addr, &cq->head,
|
||||
sizeof(cq->head));
|
||||
trace_pci_nvme_shadow_doorbell_cq(cq->cqid, cq->head);
|
||||
uint32_t v;
|
||||
|
||||
pci_dma_read(PCI_DEVICE(cq->ctrl), cq->db_addr, &v, sizeof(v));
|
||||
|
||||
cq->head = le32_to_cpu(v);
|
||||
|
||||
trace_pci_nvme_update_cq_head(cq->cqid, cq->head);
|
||||
}
|
||||
|
||||
static void nvme_post_cqes(void *opaque)
|
||||
@ -1351,6 +1367,7 @@ static void nvme_post_cqes(void *opaque)
|
||||
hwaddr addr;
|
||||
|
||||
if (n->dbbuf_enabled) {
|
||||
nvme_update_cq_eventidx(cq);
|
||||
nvme_update_cq_head(cq);
|
||||
}
|
||||
|
||||
@ -1363,7 +1380,7 @@ static void nvme_post_cqes(void *opaque)
|
||||
req->cqe.sq_id = cpu_to_le16(sq->sqid);
|
||||
req->cqe.sq_head = cpu_to_le16(sq->head);
|
||||
addr = cq->dma_addr + cq->tail * n->cqe_size;
|
||||
ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
|
||||
ret = pci_dma_write(PCI_DEVICE(n), addr, (void *)&req->cqe,
|
||||
sizeof(req->cqe));
|
||||
if (ret) {
|
||||
trace_pci_nvme_err_addr_write(addr);
|
||||
@ -4615,6 +4632,7 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
|
||||
|
||||
static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
|
||||
{
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
uint16_t offset = (cq->cqid << 3) + (1 << 2);
|
||||
|
||||
n->cq[cq->cqid] = NULL;
|
||||
@ -4625,8 +4643,8 @@ static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
|
||||
event_notifier_set_handler(&cq->notifier, NULL);
|
||||
event_notifier_cleanup(&cq->notifier);
|
||||
}
|
||||
if (msix_enabled(&n->parent_obj)) {
|
||||
msix_vector_unuse(&n->parent_obj, cq->vector);
|
||||
if (msix_enabled(pci)) {
|
||||
msix_vector_unuse(pci, cq->vector);
|
||||
}
|
||||
if (cq->cqid) {
|
||||
g_free(cq);
|
||||
@ -4664,8 +4682,10 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
|
||||
uint16_t cqid, uint16_t vector, uint16_t size,
|
||||
uint16_t irq_enabled)
|
||||
{
|
||||
if (msix_enabled(&n->parent_obj)) {
|
||||
msix_vector_use(&n->parent_obj, vector);
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
|
||||
if (msix_enabled(pci)) {
|
||||
msix_vector_use(pci, vector);
|
||||
}
|
||||
cq->ctrl = n;
|
||||
cq->cqid = cqid;
|
||||
@ -4716,7 +4736,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
|
||||
trace_pci_nvme_err_invalid_create_cq_addr(prp1);
|
||||
return NVME_INVALID_PRP_OFFSET | NVME_DNR;
|
||||
}
|
||||
if (unlikely(!msix_enabled(&n->parent_obj) && vector)) {
|
||||
if (unlikely(!msix_enabled(PCI_DEVICE(n)) && vector)) {
|
||||
trace_pci_nvme_err_invalid_create_cq_vector(vector);
|
||||
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
|
||||
}
|
||||
@ -5959,6 +5979,7 @@ static uint16_t nvme_assign_virt_res_to_sec(NvmeCtrl *n, NvmeRequest *req,
|
||||
|
||||
static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online)
|
||||
{
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
NvmeCtrl *sn = NULL;
|
||||
NvmeSecCtrlEntry *sctrl;
|
||||
int vf_index;
|
||||
@ -5968,9 +5989,9 @@ static uint16_t nvme_virt_set_state(NvmeCtrl *n, uint16_t cntlid, bool online)
|
||||
return NVME_INVALID_CTRL_ID | NVME_DNR;
|
||||
}
|
||||
|
||||
if (!pci_is_vf(&n->parent_obj)) {
|
||||
if (!pci_is_vf(pci)) {
|
||||
vf_index = le16_to_cpu(sctrl->vfn) - 1;
|
||||
sn = NVME(pcie_sriov_get_vf_at_index(&n->parent_obj, vf_index));
|
||||
sn = NVME(pcie_sriov_get_vf_at_index(pci, vf_index));
|
||||
}
|
||||
|
||||
if (online) {
|
||||
@ -6028,6 +6049,7 @@ static uint16_t nvme_virt_mngmt(NvmeCtrl *n, NvmeRequest *req)
|
||||
|
||||
static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
|
||||
{
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
uint64_t dbs_addr = le64_to_cpu(req->cmd.dptr.prp1);
|
||||
uint64_t eis_addr = le64_to_cpu(req->cmd.dptr.prp2);
|
||||
int i;
|
||||
@ -6054,8 +6076,7 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
|
||||
*/
|
||||
sq->db_addr = dbs_addr + (i << 3);
|
||||
sq->ei_addr = eis_addr + (i << 3);
|
||||
pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail,
|
||||
sizeof(sq->tail));
|
||||
pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
|
||||
|
||||
if (n->params.ioeventfd && sq->sqid != 0) {
|
||||
if (!nvme_init_sq_ioeventfd(sq)) {
|
||||
@ -6068,8 +6089,7 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
|
||||
/* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */
|
||||
cq->db_addr = dbs_addr + (i << 3) + (1 << 2);
|
||||
cq->ei_addr = eis_addr + (i << 3) + (1 << 2);
|
||||
pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head,
|
||||
sizeof(cq->head));
|
||||
pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
|
||||
|
||||
if (n->params.ioeventfd && cq->cqid != 0) {
|
||||
if (!nvme_init_cq_ioeventfd(cq)) {
|
||||
@ -6141,16 +6161,22 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
|
||||
|
||||
static void nvme_update_sq_eventidx(const NvmeSQueue *sq)
|
||||
{
|
||||
pci_dma_write(&sq->ctrl->parent_obj, sq->ei_addr, &sq->tail,
|
||||
sizeof(sq->tail));
|
||||
trace_pci_nvme_eventidx_sq(sq->sqid, sq->tail);
|
||||
uint32_t v = cpu_to_le32(sq->tail);
|
||||
|
||||
trace_pci_nvme_update_sq_eventidx(sq->sqid, sq->tail);
|
||||
|
||||
pci_dma_write(PCI_DEVICE(sq->ctrl), sq->ei_addr, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static void nvme_update_sq_tail(NvmeSQueue *sq)
|
||||
{
|
||||
pci_dma_read(&sq->ctrl->parent_obj, sq->db_addr, &sq->tail,
|
||||
sizeof(sq->tail));
|
||||
trace_pci_nvme_shadow_doorbell_sq(sq->sqid, sq->tail);
|
||||
uint32_t v;
|
||||
|
||||
pci_dma_read(PCI_DEVICE(sq->ctrl), sq->db_addr, &v, sizeof(v));
|
||||
|
||||
sq->tail = le32_to_cpu(v);
|
||||
|
||||
trace_pci_nvme_update_sq_tail(sq->sqid, sq->tail);
|
||||
}
|
||||
|
||||
static void nvme_process_sq(void *opaque)
|
||||
@ -6216,7 +6242,7 @@ static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size)
|
||||
|
||||
static void nvme_activate_virt_res(NvmeCtrl *n)
|
||||
{
|
||||
PCIDevice *pci_dev = &n->parent_obj;
|
||||
PCIDevice *pci_dev = PCI_DEVICE(n);
|
||||
NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
|
||||
NvmeSecCtrlEntry *sctrl;
|
||||
|
||||
@ -6239,7 +6265,7 @@ static void nvme_activate_virt_res(NvmeCtrl *n)
|
||||
|
||||
static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst)
|
||||
{
|
||||
PCIDevice *pci_dev = &n->parent_obj;
|
||||
PCIDevice *pci_dev = PCI_DEVICE(n);
|
||||
NvmeSecCtrlEntry *sctrl;
|
||||
NvmeNamespace *ns;
|
||||
int i;
|
||||
@ -6356,7 +6382,7 @@ static int nvme_start_ctrl(NvmeCtrl *n)
|
||||
uint32_t page_size = 1 << page_bits;
|
||||
NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
|
||||
|
||||
if (pci_is_vf(&n->parent_obj) && !sctrl->scs) {
|
||||
if (pci_is_vf(PCI_DEVICE(n)) && !sctrl->scs) {
|
||||
trace_pci_nvme_err_startfail_virt_state(le16_to_cpu(sctrl->nvi),
|
||||
le16_to_cpu(sctrl->nvq),
|
||||
sctrl->scs ? "ONLINE" :
|
||||
@ -6471,6 +6497,7 @@ static void nvme_cmb_enable_regs(NvmeCtrl *n)
|
||||
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
|
||||
unsigned size)
|
||||
{
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
uint64_t cap = ldq_le_p(&n->bar.cap);
|
||||
uint32_t cc = ldl_le_p(&n->bar.cc);
|
||||
uint32_t intms = ldl_le_p(&n->bar.intms);
|
||||
@ -6494,7 +6521,7 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
|
||||
|
||||
switch (offset) {
|
||||
case NVME_REG_INTMS:
|
||||
if (unlikely(msix_enabled(&(n->parent_obj)))) {
|
||||
if (unlikely(msix_enabled(pci))) {
|
||||
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
|
||||
"undefined access to interrupt mask set"
|
||||
" when MSI-X is enabled");
|
||||
@ -6507,7 +6534,7 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
|
||||
nvme_irq_check(n);
|
||||
break;
|
||||
case NVME_REG_INTMC:
|
||||
if (unlikely(msix_enabled(&(n->parent_obj)))) {
|
||||
if (unlikely(msix_enabled(pci))) {
|
||||
NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
|
||||
"undefined access to interrupt mask clr"
|
||||
" when MSI-X is enabled");
|
||||
@ -6732,7 +6759,7 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs &&
|
||||
if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs &&
|
||||
addr != NVME_REG_CSTS) {
|
||||
trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size);
|
||||
return 0;
|
||||
@ -6753,6 +6780,7 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
|
||||
|
||||
static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
|
||||
{
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
uint32_t qid;
|
||||
|
||||
if (unlikely(addr & ((1 << 2) - 1))) {
|
||||
@ -6820,8 +6848,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
|
||||
start_sqs = nvme_cq_full(cq) ? 1 : 0;
|
||||
cq->head = new_head;
|
||||
if (!qid && n->dbbuf_enabled) {
|
||||
pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head,
|
||||
sizeof(cq->head));
|
||||
pci_dma_write(pci, cq->db_addr, &cq->head, sizeof(cq->head));
|
||||
}
|
||||
if (start_sqs) {
|
||||
NvmeSQueue *sq;
|
||||
@ -6894,8 +6921,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
|
||||
* including ones that run on Linux, are not updating Admin Queues,
|
||||
* so we can't trust reading it for an appropriate sq tail.
|
||||
*/
|
||||
pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail,
|
||||
sizeof(sq->tail));
|
||||
pci_dma_write(pci, sq->db_addr, &sq->tail, sizeof(sq->tail));
|
||||
}
|
||||
|
||||
qemu_bh_schedule(sq->bh);
|
||||
@ -6909,7 +6935,7 @@ static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
|
||||
|
||||
trace_pci_nvme_mmio_write(addr, data, size);
|
||||
|
||||
if (pci_is_vf(&n->parent_obj) && !nvme_sctrl(n)->scs &&
|
||||
if (pci_is_vf(PCI_DEVICE(n)) && !nvme_sctrl(n)->scs &&
|
||||
addr != NVME_REG_CSTS) {
|
||||
trace_pci_nvme_err_ignored_mmio_vf_offline(addr, size);
|
||||
return;
|
||||
@ -6955,7 +6981,7 @@ static const MemoryRegionOps nvme_cmb_ops = {
|
||||
},
|
||||
};
|
||||
|
||||
static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
|
||||
static bool nvme_check_params(NvmeCtrl *n, Error **errp)
|
||||
{
|
||||
NvmeParams *params = &n->params;
|
||||
|
||||
@ -6969,38 +6995,38 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
|
||||
if (n->namespace.blkconf.blk && n->subsys) {
|
||||
error_setg(errp, "subsystem support is unavailable with legacy "
|
||||
"namespace ('drive' property)");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->max_ioqpairs < 1 ||
|
||||
params->max_ioqpairs > NVME_MAX_IOQPAIRS) {
|
||||
error_setg(errp, "max_ioqpairs must be between 1 and %d",
|
||||
NVME_MAX_IOQPAIRS);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->msix_qsize < 1 ||
|
||||
params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) {
|
||||
error_setg(errp, "msix_qsize must be between 1 and %d",
|
||||
PCI_MSIX_FLAGS_QSIZE + 1);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!params->serial) {
|
||||
error_setg(errp, "serial property not set");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (n->pmr.dev) {
|
||||
if (host_memory_backend_is_mapped(n->pmr.dev)) {
|
||||
error_setg(errp, "can't use already busy memdev: %s",
|
||||
object_get_canonical_path_component(OBJECT(n->pmr.dev)));
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(n->pmr.dev->size)) {
|
||||
error_setg(errp, "pmr backend size needs to be power of 2 in size");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
host_memory_backend_set_mapped(n->pmr.dev, true);
|
||||
@ -7009,64 +7035,64 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
|
||||
if (n->params.zasl > n->params.mdts) {
|
||||
error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less "
|
||||
"than or equal to mdts (Maximum Data Transfer Size)");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!n->params.vsl) {
|
||||
error_setg(errp, "vsl must be non-zero");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->sriov_max_vfs) {
|
||||
if (!n->subsys) {
|
||||
error_setg(errp, "subsystem is required for the use of SR-IOV");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->sriov_max_vfs > NVME_MAX_VFS) {
|
||||
error_setg(errp, "sriov_max_vfs must be between 0 and %d",
|
||||
NVME_MAX_VFS);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->cmb_size_mb) {
|
||||
error_setg(errp, "CMB is not supported with SR-IOV");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (n->pmr.dev) {
|
||||
error_setg(errp, "PMR is not supported with SR-IOV");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) {
|
||||
error_setg(errp, "both sriov_vq_flexible and sriov_vi_flexible"
|
||||
" must be set for the use of SR-IOV");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) {
|
||||
error_setg(errp, "sriov_vq_flexible must be greater than or equal"
|
||||
" to %d (sriov_max_vfs * 2)", params->sriov_max_vfs * 2);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->max_ioqpairs < params->sriov_vq_flexible + 2) {
|
||||
error_setg(errp, "(max_ioqpairs - sriov_vq_flexible) must be"
|
||||
" greater than or equal to 2");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->sriov_vi_flexible < params->sriov_max_vfs) {
|
||||
error_setg(errp, "sriov_vi_flexible must be greater than or equal"
|
||||
" to %d (sriov_max_vfs)", params->sriov_max_vfs);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->msix_qsize < params->sriov_vi_flexible + 1) {
|
||||
error_setg(errp, "(msix_qsize - sriov_vi_flexible) must be"
|
||||
" greater than or equal to 1");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->sriov_max_vi_per_vf &&
|
||||
@ -7074,7 +7100,7 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
|
||||
error_setg(errp, "sriov_max_vi_per_vf must meet:"
|
||||
" (sriov_max_vi_per_vf - 1) %% %d == 0 and"
|
||||
" sriov_max_vi_per_vf >= 1", NVME_VF_RES_GRANULARITY);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (params->sriov_max_vq_per_vf &&
|
||||
@ -7083,9 +7109,11 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
|
||||
error_setg(errp, "sriov_max_vq_per_vf must meet:"
|
||||
" (sriov_max_vq_per_vf - 1) %% %d == 0 and"
|
||||
" sriov_max_vq_per_vf >= 2", NVME_VF_RES_GRANULARITY);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nvme_init_state(NvmeCtrl *n)
|
||||
@ -7093,10 +7121,11 @@ static void nvme_init_state(NvmeCtrl *n)
|
||||
NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
|
||||
NvmeSecCtrlList *list = &n->sec_ctrl_list;
|
||||
NvmeSecCtrlEntry *sctrl;
|
||||
PCIDevice *pci = PCI_DEVICE(n);
|
||||
uint8_t max_vfs;
|
||||
int i;
|
||||
|
||||
if (pci_is_vf(&n->parent_obj)) {
|
||||
if (pci_is_vf(pci)) {
|
||||
sctrl = nvme_sctrl(n);
|
||||
max_vfs = 0;
|
||||
n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0;
|
||||
@ -7125,7 +7154,7 @@ static void nvme_init_state(NvmeCtrl *n)
|
||||
cap->cntlid = cpu_to_le16(n->cntlid);
|
||||
cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
|
||||
|
||||
if (pci_is_vf(&n->parent_obj)) {
|
||||
if (pci_is_vf(pci)) {
|
||||
cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs);
|
||||
} else {
|
||||
cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs -
|
||||
@ -7138,7 +7167,7 @@ static void nvme_init_state(NvmeCtrl *n)
|
||||
cap->vqfrt / MAX(max_vfs, 1);
|
||||
}
|
||||
|
||||
if (pci_is_vf(&n->parent_obj)) {
|
||||
if (pci_is_vf(pci)) {
|
||||
cap->viprt = cpu_to_le16(n->conf_msix_qsize);
|
||||
} else {
|
||||
cap->viprt = cpu_to_le16(n->params.msix_qsize -
|
||||
@ -7261,15 +7290,14 @@ static int nvme_add_pm_capability(PCIDevice *pci_dev, uint8_t offset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
|
||||
static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
uint8_t *pci_conf = pci_dev->config;
|
||||
uint64_t bar_size;
|
||||
unsigned msix_table_offset, msix_pba_offset;
|
||||
int ret;
|
||||
|
||||
Error *err = NULL;
|
||||
|
||||
pci_conf[PCI_INTERRUPT_PIN] = 1;
|
||||
pci_config_set_prog_interface(pci_conf, 0x2);
|
||||
|
||||
@ -7306,14 +7334,14 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
|
||||
}
|
||||
ret = msix_init(pci_dev, n->params.msix_qsize,
|
||||
&n->bar0, 0, msix_table_offset,
|
||||
&n->bar0, 0, msix_pba_offset, 0, &err);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOTSUP) {
|
||||
warn_report_err(err);
|
||||
} else {
|
||||
error_propagate(errp, err);
|
||||
return ret;
|
||||
}
|
||||
&n->bar0, 0, msix_pba_offset, 0, errp);
|
||||
if (ret == -ENOTSUP) {
|
||||
/* report that msix is not supported, but do not error out */
|
||||
warn_report_err(*errp);
|
||||
*errp = NULL;
|
||||
} else if (ret < 0) {
|
||||
/* propagate error to caller */
|
||||
return false;
|
||||
}
|
||||
|
||||
nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
|
||||
@ -7330,7 +7358,7 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
|
||||
nvme_init_sriov(n, pci_dev, 0x120);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nvme_init_subnqn(NvmeCtrl *n)
|
||||
@ -7445,7 +7473,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
|
||||
stl_le_p(&n->bar.vs, NVME_SPEC_VER);
|
||||
n->bar.intmc = n->bar.intms = 0;
|
||||
|
||||
if (pci_is_vf(&n->parent_obj) && !sctrl->scs) {
|
||||
if (pci_is_vf(pci_dev) && !sctrl->scs) {
|
||||
stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
|
||||
}
|
||||
}
|
||||
@ -7483,8 +7511,8 @@ void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns)
|
||||
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
|
||||
{
|
||||
NvmeCtrl *n = NVME(pci_dev);
|
||||
DeviceState *dev = DEVICE(pci_dev);
|
||||
NvmeNamespace *ns;
|
||||
Error *local_err = NULL;
|
||||
NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev));
|
||||
|
||||
if (pci_is_vf(pci_dev)) {
|
||||
@ -7496,21 +7524,17 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
|
||||
n->subsys = pn->subsys;
|
||||
}
|
||||
|
||||
nvme_check_constraints(n, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
if (!nvme_check_params(n, errp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS,
|
||||
&pci_dev->qdev, n->parent_obj.qdev.id);
|
||||
qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id);
|
||||
|
||||
if (nvme_init_subsys(n, errp)) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
nvme_init_state(n);
|
||||
if (nvme_init_pci(n, pci_dev, errp)) {
|
||||
if (!nvme_init_pci(n, pci_dev, errp)) {
|
||||
return;
|
||||
}
|
||||
nvme_init_ctrl(n, pci_dev);
|
||||
|
@ -84,8 +84,8 @@ pci_nvme_enqueue_event_noqueue(int queued) "queued %d"
|
||||
pci_nvme_enqueue_event_masked(uint8_t typ) "type 0x%"PRIx8""
|
||||
pci_nvme_no_outstanding_aers(void) "ignoring event; no outstanding AERs"
|
||||
pci_nvme_enqueue_req_completion(uint16_t cid, uint16_t cqid, uint32_t dw0, uint32_t dw1, uint16_t status) "cid %"PRIu16" cqid %"PRIu16" dw0 0x%"PRIx32" dw1 0x%"PRIx32" status 0x%"PRIx16""
|
||||
pci_nvme_eventidx_cq(uint16_t cqid, uint16_t new_eventidx) "cqid %"PRIu16" new_eventidx %"PRIu16""
|
||||
pci_nvme_eventidx_sq(uint16_t sqid, uint16_t new_eventidx) "sqid %"PRIu16" new_eventidx %"PRIu16""
|
||||
pci_nvme_update_cq_eventidx(uint16_t cqid, uint16_t new_eventidx) "cqid %"PRIu16" new_eventidx %"PRIu16""
|
||||
pci_nvme_update_sq_eventidx(uint16_t sqid, uint16_t new_eventidx) "sqid %"PRIu16" new_eventidx %"PRIu16""
|
||||
pci_nvme_mmio_read(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d"
|
||||
pci_nvme_mmio_write(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d"
|
||||
pci_nvme_mmio_doorbell_cq(uint16_t cqid, uint16_t new_head) "cqid %"PRIu16" new_head %"PRIu16""
|
||||
@ -102,8 +102,8 @@ pci_nvme_mmio_start_success(void) "setting controller enable bit succeeded"
|
||||
pci_nvme_mmio_stopped(void) "cleared controller enable bit"
|
||||
pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
|
||||
pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
|
||||
pci_nvme_shadow_doorbell_cq(uint16_t cqid, uint16_t new_shadow_doorbell) "cqid %"PRIu16" new_shadow_doorbell %"PRIu16""
|
||||
pci_nvme_shadow_doorbell_sq(uint16_t sqid, uint16_t new_shadow_doorbell) "sqid %"PRIu16" new_shadow_doorbell %"PRIu16""
|
||||
pci_nvme_update_cq_head(uint16_t cqid, uint16_t new_head) "cqid %"PRIu16" new_head %"PRIu16""
|
||||
pci_nvme_update_sq_tail(uint16_t sqid, uint16_t new_tail) "sqid %"PRIu16" new_tail %"PRIu16""
|
||||
pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
|
||||
pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
|
||||
pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32""
|
||||
|
Loading…
Reference in New Issue
Block a user