linux/drivers/mmc/host/mmc_hsq.h
Michael Wu e026a3f917 mmc: mmc-hsq: Use fifo to dispatch mmc_request
Current next_tag selection will cause a large delay in some requests and
destroy the scheduling results of the block scheduling layer. Because the
issued mrq tags cannot ensure that each time is sequential, especially when
the IO load is heavy. In the fio performance test, we found that 4k random
read data was sent to mmc_hsq to start calling request_atomic It takes
nearly 200ms to process the request, while mmc_hsq has processed thousands
of other requests. So we use fifo here to ensure the first in, first out
feature of the request and avoid adding additional delay to the request.

Reviewed-by: Wenchao Chen <wenchao.chen@unisoc.com>
Signed-off-by: Michael Wu <michael@allwinnertech.com>
Link: https://lore.kernel.org/r/20221128093847.22768-1-michael@allwinnertech.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2022-12-07 13:29:14 +01:00

37 lines
757 B
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_MMC_HSQ_H
#define LINUX_MMC_HSQ_H
#define HSQ_NUM_SLOTS 64
#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
struct hsq_slot {
struct mmc_request *mrq;
};
struct mmc_hsq {
struct mmc_host *mmc;
struct mmc_request *mrq;
wait_queue_head_t wait_queue;
struct hsq_slot *slot;
spinlock_t lock;
struct work_struct retry_work;
int next_tag;
int num_slots;
int qcnt;
int tail_tag;
int tag_slot[HSQ_NUM_SLOTS];
bool enabled;
bool waiting_for_idle;
bool recovery_halt;
};
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc);
void mmc_hsq_suspend(struct mmc_host *mmc);
int mmc_hsq_resume(struct mmc_host *mmc);
bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq);
#endif