mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
for-linus-20191115
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl3O/gkQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpsLBD/47jITnsOf/EU1gqW8vbl+psrPYQN+p68id EA5L8fqF7wHg/Anxg9MApDO6noH8BvnfSGFnqxWoE5YcvT/mfj4pVciLMiNG2BwA hUiJCwIG8SGCn2MRbaTQpqRnMw8aoTKdJAUWwjZTl/db+X9aCv++Odn4XuAABfh2 LxIb0ZZBF5M8CfKRHtksuCcGBftEUTrlCzSZ9dXI5tD8EpRNJw/5LDGB6w7inhcZ 0+X7ENdSQrMKA9ImJunLPUDFejHu4fr4qJdAX67Qai0Wf2dR54eaXmTVO4d4SGcU UX0zpNC6bozCq+X/ICnlJkK+ECuR33xFLRIS0S7Xv2Er6n3Ul8N6cb6RRv8Q+o1h XG5NfpOH+Atqmdyp9zSRI2c2UVfIfmvmRVIUFM+ZXmdw5oSfUltGLdyNVnKuhzc+ f2Y3dti96YnT35TIihKcwfqlFuaXfLfCmLYabtVylwlOJ80Sjhgea3IyvwstpJau uIs5X8Z5AdBuqufPj4veS3x73DeE7slGmzADcNtUeFb1K5423MJqlQUOeVeJW3x3 85tS7aot/SoMnA1dtREvceerFP/lIa/02iqX0TYQ7BqsN5oZjQzaiuJkUfV2WNOs 3TlNRBKF69tpX4+NXxaSm5kC0YHtHIWF0EtNliKM7Yi8WS0tVsy74pDO7otj3j1m s10Rr/1seA== =wP5w -----END PGP SIGNATURE----- Merge tag 'for-linus-20191115' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A few fixes that should make it into this release. This contains: - io_uring: - The timeout command assumes sequence == 0 means that we want one completion, but this kind of overloading is unfortunate as it prevents users from doing a pure time based wait. Since this operation was introduced in this cycle, let's correct it now, while we can. (me) - One-liner to fix an issue with dependent links and fixed buffer reads. The actual IO completed fine, but the link got severed since we stored the wrong expected value. (me) - Add TIMEOUT to list of opcodes that don't need a file. (Pavel) - rsxx missing workqueue destry calls. Old bug. (Chuhong) - Fix blk-iocost active list check (Jiufei) - Fix impossible-to-hit overflow merge condition, that still hit some folks very rarely (Junichi) - Fix bfq hang issue from 5.3. This didn't get marked for stable, but will go into stable post this merge (Paolo)" * tag 'for-linus-20191115' of git://git.kernel.dk/linux-block: rsxx: add missed destroy_workqueue calls in remove iocost: check active_list of all the ancestors in iocg_activate() block, bfq: deschedule empty bfq_queues not referred by any process io_uring: ensure registered buffer import returns the IO length io_uring: Fix getting file for timeout block: check bi_size overflow before merge io_uring: make timeout sequence == 0 mean no sequence
This commit is contained in:
commit
b226c9e1f4
@ -2713,6 +2713,28 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static
|
||||
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
{
|
||||
/*
|
||||
* To prevent bfqq's service guarantees from being violated,
|
||||
* bfqq may be left busy, i.e., queued for service, even if
|
||||
* empty (see comments in __bfq_bfqq_expire() for
|
||||
* details). But, if no process will send requests to bfqq any
|
||||
* longer, then there is no point in keeping bfqq queued for
|
||||
* service. In addition, keeping bfqq queued for service, but
|
||||
* with no process ref any longer, may have caused bfqq to be
|
||||
* freed when dequeued from service. But this is assumed to
|
||||
* never happen.
|
||||
*/
|
||||
if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
|
||||
bfqq != bfqd->in_service_queue)
|
||||
bfq_del_bfqq_busy(bfqd, bfqq, false);
|
||||
|
||||
bfq_put_queue(bfqq);
|
||||
}
|
||||
|
||||
static void
|
||||
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
||||
@ -2783,8 +2805,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
||||
*/
|
||||
new_bfqq->pid = -1;
|
||||
bfqq->bic = NULL;
|
||||
/* release process reference to bfqq */
|
||||
bfq_put_queue(bfqq);
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
}
|
||||
|
||||
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
||||
@ -4899,7 +4920,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
|
||||
bfq_put_cooperator(bfqq);
|
||||
|
||||
bfq_put_queue(bfqq); /* release process reference */
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
}
|
||||
|
||||
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
|
||||
@ -5001,8 +5022,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
|
||||
|
||||
bfqq = bic_to_bfqq(bic, false);
|
||||
if (bfqq) {
|
||||
/* release process reference on this queue */
|
||||
bfq_put_queue(bfqq);
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
|
||||
bic_set_bfqq(bic, bfqq, false);
|
||||
}
|
||||
@ -5963,7 +5983,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
|
||||
|
||||
bfq_put_cooperator(bfqq);
|
||||
|
||||
bfq_put_queue(bfqq);
|
||||
bfq_release_process_ref(bfqq->bfqd, bfqq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -751,7 +751,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||
return false;
|
||||
|
||||
if (bio->bi_vcnt > 0) {
|
||||
if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
|
||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
|
||||
if (page_is_mergeable(bv, page, len, off, same_page)) {
|
||||
|
@ -1057,9 +1057,12 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
|
||||
atomic64_set(&iocg->active_period, cur_period);
|
||||
|
||||
/* already activated or breaking leaf-only constraint? */
|
||||
for (i = iocg->level; i > 0; i--)
|
||||
if (!list_empty(&iocg->active_list))
|
||||
if (!list_empty(&iocg->active_list))
|
||||
goto succeed_unlock;
|
||||
for (i = iocg->level - 1; i > 0; i--)
|
||||
if (!list_empty(&iocg->ancestors[i]->active_list))
|
||||
goto fail_unlock;
|
||||
|
||||
if (iocg->child_active_sum)
|
||||
goto fail_unlock;
|
||||
|
||||
@ -1101,6 +1104,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
|
||||
ioc_start_period(ioc, now);
|
||||
}
|
||||
|
||||
succeed_unlock:
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
return true;
|
||||
|
||||
|
@ -1000,8 +1000,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
|
||||
|
||||
cancel_work_sync(&card->event_work);
|
||||
|
||||
destroy_workqueue(card->event_wq);
|
||||
rsxx_destroy_dev(card);
|
||||
rsxx_dma_destroy(card);
|
||||
destroy_workqueue(card->creg_ctrl.creg_wq);
|
||||
|
||||
spin_lock_irqsave(&card->irq_lock, flags);
|
||||
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
|
||||
|
@ -326,6 +326,7 @@ struct io_kiocb {
|
||||
#define REQ_F_TIMEOUT 1024 /* timeout request */
|
||||
#define REQ_F_ISREG 2048 /* regular file */
|
||||
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
|
||||
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
|
||||
u64 user_data;
|
||||
u32 result;
|
||||
u32 sequence;
|
||||
@ -453,9 +454,13 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
|
||||
struct io_kiocb *req;
|
||||
|
||||
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
|
||||
if (req && !__io_sequence_defer(ctx, req)) {
|
||||
list_del_init(&req->list);
|
||||
return req;
|
||||
if (req) {
|
||||
if (req->flags & REQ_F_TIMEOUT_NOSEQ)
|
||||
return NULL;
|
||||
if (!__io_sequence_defer(ctx, req)) {
|
||||
list_del_init(&req->list);
|
||||
return req;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -1225,7 +1230,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
|
||||
@ -1941,18 +1946,24 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
|
||||
return -EFAULT;
|
||||
|
||||
req->flags |= REQ_F_TIMEOUT;
|
||||
|
||||
/*
|
||||
* sqe->off holds how many events that need to occur for this
|
||||
* timeout event to be satisfied.
|
||||
* timeout event to be satisfied. If it isn't set, then this is
|
||||
* a pure timeout request, sequence isn't used.
|
||||
*/
|
||||
count = READ_ONCE(sqe->off);
|
||||
if (!count)
|
||||
count = 1;
|
||||
if (!count) {
|
||||
req->flags |= REQ_F_TIMEOUT_NOSEQ;
|
||||
spin_lock_irq(&ctx->completion_lock);
|
||||
entry = ctx->timeout_list.prev;
|
||||
goto add;
|
||||
}
|
||||
|
||||
req->sequence = ctx->cached_sq_head + count - 1;
|
||||
/* reuse it to store the count */
|
||||
req->submit.sequence = count;
|
||||
req->flags |= REQ_F_TIMEOUT;
|
||||
|
||||
/*
|
||||
* Insertion sort, ensuring the first entry in the list is always
|
||||
@ -1964,6 +1975,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
unsigned nxt_sq_head;
|
||||
long long tmp, tmp_nxt;
|
||||
|
||||
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Since cached_sq_head + count - 1 can overflow, use type long
|
||||
* long to store it.
|
||||
@ -1990,6 +2004,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
nxt->sequence++;
|
||||
}
|
||||
req->sequence -= span;
|
||||
add:
|
||||
list_add(&req->list, entry);
|
||||
spin_unlock_irq(&ctx->completion_lock);
|
||||
|
||||
@ -2283,6 +2298,7 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
|
||||
switch (op) {
|
||||
case IORING_OP_NOP:
|
||||
case IORING_OP_POLL_REMOVE:
|
||||
case IORING_OP_TIMEOUT:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user