mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
68df98c483
By dynamically adjusting the host->hsq_depth, based upon the buffer size being 4k and that we get at least two I/O write requests in flight, we can improve the throughput a bit. This is typical for a random I/O write pattern. More precisely, by dynamically changing the number of requests in flight from 2 to 5, we can on some platforms observe ~4-5% increase in throughput. Signed-off-by: Wenchao Chen <wenchao.chen@unisoc.com> Link: https://lore.kernel.org/r/20230919074707.25517-3-wenchao.chen@unisoc.com [Ulf: Re-wrote the commitmsg, minor adjustment to the code - all to clarify.] Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
48 lines
1014 B
C
48 lines
1014 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef LINUX_MMC_HSQ_H
|
|
#define LINUX_MMC_HSQ_H
|
|
|
|
#define HSQ_NUM_SLOTS 64
|
|
#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
|
|
|
|
/*
|
|
* For MMC host software queue, we only allow 2 requests in
|
|
* flight to avoid a long latency.
|
|
*/
|
|
#define HSQ_NORMAL_DEPTH 2
|
|
/*
|
|
* For 4k random writes, we allow hsq_depth to increase to 5
|
|
* for better performance.
|
|
*/
|
|
#define HSQ_PERFORMANCE_DEPTH 5
|
|
|
|
struct hsq_slot {
|
|
struct mmc_request *mrq;
|
|
};
|
|
|
|
struct mmc_hsq {
|
|
struct mmc_host *mmc;
|
|
struct mmc_request *mrq;
|
|
wait_queue_head_t wait_queue;
|
|
struct hsq_slot *slot;
|
|
spinlock_t lock;
|
|
struct work_struct retry_work;
|
|
|
|
int next_tag;
|
|
int num_slots;
|
|
int qcnt;
|
|
int tail_tag;
|
|
int tag_slot[HSQ_NUM_SLOTS];
|
|
|
|
bool enabled;
|
|
bool waiting_for_idle;
|
|
bool recovery_halt;
|
|
};
|
|
|
|
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc);
|
|
void mmc_hsq_suspend(struct mmc_host *mmc);
|
|
int mmc_hsq_resume(struct mmc_host *mmc);
|
|
bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq);
|
|
|
|
#endif
|