mirror of
https://github.com/qemu/qemu.git
synced 2024-12-06 02:03:38 +08:00
6a25a4b42e
Decouple the requested maximum number of ioqpairs (param max_ioqpairs) from the number of MSI-X interrupt vectors by introducing a new msix_qsize parameter and initialize MSI-X with that. This allows emulating a device that has fewer vectors than I/O queue pairs and also allows more than 2048 queue pairs. To keep the device behaving as previously, use a msix_qsize default of 65 (default max_ioqpairs + 1). This decoupling was actually suggested by Maxim some time ago in a slightly different context, so adding a Suggested-by. Suggested-by: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Message-Id: <20200609190333.59390-22-its@irrelevant.dk> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
119 lines
2.9 KiB
C
119 lines
2.9 KiB
C
#ifndef HW_NVME_H
|
|
#define HW_NVME_H
|
|
|
|
#include "block/nvme.h"
|
|
|
|
typedef struct NvmeParams {
|
|
char *serial;
|
|
uint32_t num_queues; /* deprecated since 5.1 */
|
|
uint32_t max_ioqpairs;
|
|
uint16_t msix_qsize;
|
|
uint32_t cmb_size_mb;
|
|
} NvmeParams;
|
|
|
|
typedef struct NvmeAsyncEvent {
|
|
QSIMPLEQ_ENTRY(NvmeAsyncEvent) entry;
|
|
NvmeAerResult result;
|
|
} NvmeAsyncEvent;
|
|
|
|
typedef struct NvmeRequest {
|
|
struct NvmeSQueue *sq;
|
|
BlockAIOCB *aiocb;
|
|
uint16_t status;
|
|
bool has_sg;
|
|
NvmeCqe cqe;
|
|
BlockAcctCookie acct;
|
|
QEMUSGList qsg;
|
|
QEMUIOVector iov;
|
|
QTAILQ_ENTRY(NvmeRequest)entry;
|
|
} NvmeRequest;
|
|
|
|
typedef struct NvmeSQueue {
|
|
struct NvmeCtrl *ctrl;
|
|
uint16_t sqid;
|
|
uint16_t cqid;
|
|
uint32_t head;
|
|
uint32_t tail;
|
|
uint32_t size;
|
|
uint64_t dma_addr;
|
|
QEMUTimer *timer;
|
|
NvmeRequest *io_req;
|
|
QTAILQ_HEAD(, NvmeRequest) req_list;
|
|
QTAILQ_HEAD(, NvmeRequest) out_req_list;
|
|
QTAILQ_ENTRY(NvmeSQueue) entry;
|
|
} NvmeSQueue;
|
|
|
|
typedef struct NvmeCQueue {
|
|
struct NvmeCtrl *ctrl;
|
|
uint8_t phase;
|
|
uint16_t cqid;
|
|
uint16_t irq_enabled;
|
|
uint32_t head;
|
|
uint32_t tail;
|
|
uint32_t vector;
|
|
uint32_t size;
|
|
uint64_t dma_addr;
|
|
QEMUTimer *timer;
|
|
QTAILQ_HEAD(, NvmeSQueue) sq_list;
|
|
QTAILQ_HEAD(, NvmeRequest) req_list;
|
|
} NvmeCQueue;
|
|
|
|
typedef struct NvmeNamespace {
|
|
NvmeIdNs id_ns;
|
|
} NvmeNamespace;
|
|
|
|
static inline NvmeLBAF *nvme_ns_lbaf(NvmeNamespace *ns)
|
|
{
|
|
NvmeIdNs *id_ns = &ns->id_ns;
|
|
return &id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(id_ns->flbas)];
|
|
}
|
|
|
|
static inline uint8_t nvme_ns_lbads(NvmeNamespace *ns)
|
|
{
|
|
return nvme_ns_lbaf(ns)->ds;
|
|
}
|
|
|
|
#define TYPE_NVME "nvme"
|
|
#define NVME(obj) \
|
|
OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
|
|
|
|
typedef struct NvmeCtrl {
|
|
PCIDevice parent_obj;
|
|
MemoryRegion iomem;
|
|
MemoryRegion ctrl_mem;
|
|
NvmeBar bar;
|
|
BlockConf conf;
|
|
NvmeParams params;
|
|
|
|
uint32_t page_size;
|
|
uint16_t page_bits;
|
|
uint16_t max_prp_ents;
|
|
uint16_t cqe_size;
|
|
uint16_t sqe_size;
|
|
uint32_t reg_size;
|
|
uint32_t num_namespaces;
|
|
uint32_t max_q_ents;
|
|
uint64_t ns_size;
|
|
uint8_t *cmbuf;
|
|
uint32_t irq_status;
|
|
uint64_t host_timestamp; /* Timestamp sent by the host */
|
|
uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */
|
|
|
|
HostMemoryBackend *pmrdev;
|
|
|
|
NvmeNamespace *namespaces;
|
|
NvmeSQueue **sq;
|
|
NvmeCQueue **cq;
|
|
NvmeSQueue admin_sq;
|
|
NvmeCQueue admin_cq;
|
|
NvmeIdCtrl id_ctrl;
|
|
} NvmeCtrl;
|
|
|
|
/* calculate the number of LBAs that the namespace can accomodate */
|
|
static inline uint64_t nvme_ns_nlbas(NvmeCtrl *n, NvmeNamespace *ns)
|
|
{
|
|
return n->ns_size >> nvme_ns_lbads(ns);
|
|
}
|
|
|
|
#endif /* HW_NVME_H */
|