block: rename generic_make_request to submit_bio_noacct

generic_make_request has always been very confusingly misnamed, so rename
it to submit_bio_noacct to make it clear that it is submit_bio minus
accounting and a few checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-07-01 10:59:44 +02:00 committed by Jens Axboe
parent c62b37d96b
commit ed00aabd5e
44 changed files with 115 additions and 118 deletions

View File

@ -1036,7 +1036,7 @@ Now the generic block layer performs partition-remapping early and thus
provides drivers with a sector number relative to whole device, rather than provides drivers with a sector number relative to whole device, rather than
having to take partition number into account in order to arrive at the true having to take partition number into account in order to arrive at the true
sector number. The routine blk_partition_remap() is invoked by sector number. The routine blk_partition_remap() is invoked by
generic_make_request even before invoking the queue specific ->submit_bio, submit_bio_noacct even before invoking the queue specific ->submit_bio,
so the i/o scheduler also gets to operate on whole disk sector numbers. This so the i/o scheduler also gets to operate on whole disk sector numbers. This
should typically not require changes to block drivers, it just never gets should typically not require changes to block drivers, it just never gets
to invoke its own partition sector offset calculations since all bios to invoke its own partition sector offset calculations since all bios

View File

@ -24,7 +24,7 @@ Available fault injection capabilities
injects disk IO errors on devices permitted by setting injects disk IO errors on devices permitted by setting
/sys/block/<device>/make-it-fail or /sys/block/<device>/make-it-fail or
/sys/block/<device>/<partition>/make-it-fail. (generic_make_request()) /sys/block/<device>/<partition>/make-it-fail. (submit_bio_noacct())
- fail_mmc_request - fail_mmc_request

View File

@ -1453,7 +1453,7 @@ function-trace, we get a much larger output::
=> __blk_run_queue_uncond => __blk_run_queue_uncond
=> __blk_run_queue => __blk_run_queue
=> blk_queue_bio => blk_queue_bio
=> generic_make_request => submit_bio_noacct
=> submit_bio => submit_bio
=> submit_bh => submit_bh
=> __ext3_get_inode_loc => __ext3_get_inode_loc
@ -1738,7 +1738,7 @@ tracers.
=> __blk_run_queue_uncond => __blk_run_queue_uncond
=> __blk_run_queue => __blk_run_queue
=> blk_queue_bio => blk_queue_bio
=> generic_make_request => submit_bio_noacct
=> submit_bio => submit_bio
=> submit_bh => submit_bh
=> ext3_bread => ext3_bread

View File

@ -358,7 +358,7 @@ static void bio_alloc_rescue(struct work_struct *work)
if (!bio) if (!bio)
break; break;
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }
@ -416,19 +416,19 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
* submit the previously allocated bio for IO before attempting to allocate * submit the previously allocated bio for IO before attempting to allocate
* a new one. Failure to do so can cause deadlocks under memory pressure. * a new one. Failure to do so can cause deadlocks under memory pressure.
* *
* Note that when running under generic_make_request() (i.e. any block * Note that when running under submit_bio_noacct() (i.e. any block
* driver), bios are not submitted until after you return - see the code in * driver), bios are not submitted until after you return - see the code in
* generic_make_request() that converts recursion into iteration, to prevent * submit_bio_noacct() that converts recursion into iteration, to prevent
* stack overflows. * stack overflows.
* *
* This would normally mean allocating multiple bios under * This would normally mean allocating multiple bios under
* generic_make_request() would be susceptible to deadlocks, but we have * submit_bio_noacct() would be susceptible to deadlocks, but we have
* deadlock avoidance code that resubmits any blocked bios from a rescuer * deadlock avoidance code that resubmits any blocked bios from a rescuer
* thread. * thread.
* *
* However, we do not guarantee forward progress for allocations from other * However, we do not guarantee forward progress for allocations from other
* mempools. Doing multiple allocations from the same mempool under * mempools. Doing multiple allocations from the same mempool under
* generic_make_request() should be avoided - instead, use bio_set's front_pad * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
* for per bio allocations. * for per bio allocations.
* *
* RETURNS: * RETURNS:
@ -457,14 +457,14 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
nr_iovecs > 0)) nr_iovecs > 0))
return NULL; return NULL;
/* /*
* generic_make_request() converts recursion to iteration; this * submit_bio_noacct() converts recursion to iteration; this
* means if we're running beneath it, any bios we allocate and * means if we're running beneath it, any bios we allocate and
* submit will not be submitted (and thus freed) until after we * submit will not be submitted (and thus freed) until after we
* return. * return.
* *
* This exposes us to a potential deadlock if we allocate * This exposes us to a potential deadlock if we allocate
* multiple bios from the same bio_set() while running * multiple bios from the same bio_set() while running
* underneath generic_make_request(). If we were to allocate * underneath submit_bio_noacct(). If we were to allocate
* multiple bios (say a stacking block driver that was splitting * multiple bios (say a stacking block driver that was splitting
* bios), we would deadlock if we exhausted the mempool's * bios), we would deadlock if we exhausted the mempool's
* reserve. * reserve.

View File

@ -956,8 +956,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_OK; return BLK_STS_OK;
} }
static noinline_for_stack bool static noinline_for_stack bool submit_bio_checks(struct bio *bio)
generic_make_request_checks(struct bio *bio)
{ {
struct request_queue *q = bio->bi_disk->queue; struct request_queue *q = bio->bi_disk->queue;
blk_status_t status = BLK_STS_IOERR; blk_status_t status = BLK_STS_IOERR;
@ -985,9 +984,8 @@ generic_make_request_checks(struct bio *bio)
} }
/* /*
* Filter flush bio's early so that make_request based * Filter flush bio's early so that bio based drivers without flush
* drivers without flush support don't have to worry * support don't have to worry about them.
* about them.
*/ */
if (op_is_flush(bio->bi_opf) && if (op_is_flush(bio->bi_opf) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
@ -1072,7 +1070,7 @@ end_io:
return false; return false;
} }
static blk_qc_t do_make_request(struct bio *bio) static blk_qc_t __submit_bio(struct bio *bio)
{ {
struct gendisk *disk = bio->bi_disk; struct gendisk *disk = bio->bi_disk;
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
@ -1087,7 +1085,7 @@ static blk_qc_t do_make_request(struct bio *bio)
} }
/** /**
* generic_make_request - re-submit a bio to the block device layer for I/O * submit_bio_noacct - re-submit a bio to the block device layer for I/O
* @bio: The bio describing the location in memory and on the device. * @bio: The bio describing the location in memory and on the device.
* *
* This is a version of submit_bio() that shall only be used for I/O that is * This is a version of submit_bio() that shall only be used for I/O that is
@ -1095,7 +1093,7 @@ static blk_qc_t do_make_request(struct bio *bio)
* systems and other upper level users of the block layer should use * systems and other upper level users of the block layer should use
* submit_bio() instead. * submit_bio() instead.
*/ */
blk_qc_t generic_make_request(struct bio *bio) blk_qc_t submit_bio_noacct(struct bio *bio)
{ {
/* /*
* bio_list_on_stack[0] contains bios submitted by the current * bio_list_on_stack[0] contains bios submitted by the current
@ -1106,7 +1104,7 @@ blk_qc_t generic_make_request(struct bio *bio)
struct bio_list bio_list_on_stack[2]; struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
if (!generic_make_request_checks(bio)) if (!submit_bio_checks(bio))
goto out; goto out;
/* /*
@ -1114,7 +1112,7 @@ blk_qc_t generic_make_request(struct bio *bio)
* stack usage with stacked devices could be a problem. So use * stack usage with stacked devices could be a problem. So use
* current->bio_list to keep a list of requests submited by a * current->bio_list to keep a list of requests submited by a
* ->submit_bio method. current->bio_list is also used as a * ->submit_bio method. current->bio_list is also used as a
* flag to say if generic_make_request is currently active in this * flag to say if submit_bio_noacct is currently active in this
* task or not. If it is NULL, then no make_request is active. If * task or not. If it is NULL, then no make_request is active. If
* it is non-NULL, then a make_request is active, and new requests * it is non-NULL, then a make_request is active, and new requests
* should be added at the tail * should be added at the tail
@ -1132,7 +1130,7 @@ blk_qc_t generic_make_request(struct bio *bio)
* we assign bio_list to a pointer to the bio_list_on_stack, * we assign bio_list to a pointer to the bio_list_on_stack,
* thus initialising the bio_list of new bios to be * thus initialising the bio_list of new bios to be
* added. ->submit_bio() may indeed add some more bios * added. ->submit_bio() may indeed add some more bios
* through a recursive call to generic_make_request. If it * through a recursive call to submit_bio_noacct. If it
* did, we find a non-NULL value in bio_list and re-enter the loop * did, we find a non-NULL value in bio_list and re-enter the loop
* from the top. In this case we really did just take the bio * from the top. In this case we really did just take the bio
* of the top of the list (no pretending) and so remove it from * of the top of the list (no pretending) and so remove it from
@ -1150,7 +1148,7 @@ blk_qc_t generic_make_request(struct bio *bio)
/* Create a fresh bio_list for all subordinate requests */ /* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]); bio_list_init(&bio_list_on_stack[0]);
ret = do_make_request(bio); ret = __submit_bio(bio);
/* sort new bios into those for a lower level /* sort new bios into those for a lower level
* and those for the same level * and those for the same level
@ -1174,13 +1172,13 @@ blk_qc_t generic_make_request(struct bio *bio)
out: out:
return ret; return ret;
} }
EXPORT_SYMBOL(generic_make_request); EXPORT_SYMBOL(submit_bio_noacct);
/** /**
* direct_make_request - hand a buffer directly to its device driver for I/O * direct_make_request - hand a buffer directly to its device driver for I/O
* @bio: The bio describing the location in memory and on the device. * @bio: The bio describing the location in memory and on the device.
* *
* This function behaves like generic_make_request(), but does not protect * This function behaves like submit_bio_noacct(), but does not protect
* against recursion. Must only be used if the called driver is known * against recursion. Must only be used if the called driver is known
* to be blk-mq based. * to be blk-mq based.
*/ */
@ -1192,7 +1190,7 @@ blk_qc_t direct_make_request(struct bio *bio)
bio_io_error(bio); bio_io_error(bio);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
if (!generic_make_request_checks(bio)) if (!submit_bio_checks(bio))
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
if (unlikely(bio_queue_enter(bio))) if (unlikely(bio_queue_enter(bio)))
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
@ -1263,13 +1261,13 @@ blk_qc_t submit_bio(struct bio *bio)
blk_qc_t ret; blk_qc_t ret;
psi_memstall_enter(&pflags); psi_memstall_enter(&pflags);
ret = generic_make_request(bio); ret = submit_bio_noacct(bio);
psi_memstall_leave(&pflags); psi_memstall_leave(&pflags);
return ret; return ret;
} }
return generic_make_request(bio); return submit_bio_noacct(bio);
} }
EXPORT_SYMBOL(submit_bio); EXPORT_SYMBOL(submit_bio);

View File

@ -228,7 +228,7 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
return false; return false;
} }
bio_chain(split_bio, bio); bio_chain(split_bio, bio);
generic_make_request(bio); submit_bio_noacct(bio);
*bio_ptr = split_bio; *bio_ptr = split_bio;
} }

View File

@ -239,7 +239,7 @@ void __blk_crypto_free_request(struct request *rq)
* kernel crypto API. When the crypto API fallback is used for encryption, * kernel crypto API. When the crypto API fallback is used for encryption,
* blk-crypto may choose to split the bio into 2 - the first one that will * blk-crypto may choose to split the bio into 2 - the first one that will
* continue to be processed and the second one that will be resubmitted via * continue to be processed and the second one that will be resubmitted via
* generic_make_request. A bounce bio will be allocated to encrypt the contents * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
* of the aforementioned "first one", and *bio_ptr will be updated to this * of the aforementioned "first one", and *bio_ptr will be updated to this
* bounce bio. * bounce bio.
* *

View File

@ -338,7 +338,7 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
bio_chain(split, *bio); bio_chain(split, *bio);
trace_block_split(q, split, (*bio)->bi_iter.bi_sector); trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
generic_make_request(*bio); submit_bio_noacct(*bio);
*bio = split; *bio = split;
} }
} }

View File

@ -1339,8 +1339,8 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
if (!bio_list_empty(&bio_list_on_stack)) { if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug); blk_start_plug(&plug);
while((bio = bio_list_pop(&bio_list_on_stack))) while ((bio = bio_list_pop(&bio_list_on_stack)))
generic_make_request(bio); submit_bio_noacct(bio);
blk_finish_plug(&plug); blk_finish_plug(&plug);
} }
} }

View File

@ -309,7 +309,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
if (!passthrough && sectors < bio_sectors(*bio_orig)) { if (!passthrough && sectors < bio_sectors(*bio_orig)) {
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split); bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
bio_chain(bio, *bio_orig); bio_chain(bio, *bio_orig);
generic_make_request(*bio_orig); submit_bio_noacct(*bio_orig);
*bio_orig = bio; *bio_orig = bio;
} }
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL : bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :

View File

@ -1576,12 +1576,12 @@ void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
/* /*
* used to submit our private bio * used to submit our private bio
*/ */
static inline void drbd_generic_make_request(struct drbd_device *device, static inline void drbd_submit_bio_noacct(struct drbd_device *device,
int fault_type, struct bio *bio) int fault_type, struct bio *bio)
{ {
__release(local); __release(local);
if (!bio->bi_disk) { if (!bio->bi_disk) {
drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n"); drbd_err(device, "drbd_submit_bio_noacct: bio->bi_disk == NULL\n");
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return; return;
@ -1590,7 +1590,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
if (drbd_insert_fault(device, fault_type)) if (drbd_insert_fault(device, fault_type))
bio_io_error(bio); bio_io_error(bio);
else else
generic_make_request(bio); submit_bio_noacct(bio);
} }
void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,

View File

@ -2325,7 +2325,7 @@ static void do_retry(struct work_struct *ws)
* workqueues instead. * workqueues instead.
*/ */
/* We are not just doing generic_make_request(), /* We are not just doing submit_bio_noacct(),
* as we want to keep the start_time information. */ * as we want to keep the start_time information. */
inc_ap_bio(device); inc_ap_bio(device);
__drbd_make_request(device, bio, start_jif); __drbd_make_request(device, bio, start_jif);

View File

@ -1723,7 +1723,7 @@ next_bio:
bios = bios->bi_next; bios = bios->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
drbd_generic_make_request(device, fault_type, bio); drbd_submit_bio_noacct(device, fault_type, bio);
} while (bios); } while (bios);
return 0; return 0;

View File

@ -1164,7 +1164,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else if (bio_op(bio) == REQ_OP_DISCARD) else if (bio_op(bio) == REQ_OP_DISCARD)
drbd_process_discard_or_zeroes_req(req, EE_TRIM); drbd_process_discard_or_zeroes_req(req, EE_TRIM);
else else
generic_make_request(bio); submit_bio_noacct(bio);
put_ldev(device); put_ldev(device);
} else } else
bio_io_error(bio); bio_io_error(bio);

View File

@ -1525,7 +1525,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
drbd_req_make_private_bio(req, req->master_bio); drbd_req_make_private_bio(req, req->master_bio);
bio_set_dev(req->private_bio, device->ldev->backing_bdev); bio_set_dev(req->private_bio, device->ldev->backing_bdev);
generic_make_request(req->private_bio); submit_bio_noacct(req->private_bio);
return 0; return 0;
} }

View File

@ -913,7 +913,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
} }
atomic_inc(&pd->cdrw.pending_bios); atomic_inc(&pd->cdrw.pending_bios);
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }

View File

@ -320,7 +320,7 @@ split_retry:
split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL, split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
&pblk_bio_set); &pblk_bio_set);
bio_chain(split_bio, bio); bio_chain(split_bio, bio);
generic_make_request(bio); submit_bio_noacct(bio);
/* New bio contains first N sectors of the previous one, so /* New bio contains first N sectors of the previous one, so
* we can continue to use existing rqd, but we need to shrink * we can continue to use existing rqd, but we need to shrink

View File

@ -929,7 +929,7 @@ static inline void closure_bio_submit(struct cache_set *c,
bio_endio(bio); bio_endio(bio);
return; return;
} }
generic_make_request(bio); submit_bio_noacct(bio);
} }
/* /*

View File

@ -959,7 +959,7 @@ err:
* bch_btree_node_get - find a btree node in the cache and lock it, reading it * bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary. * in from disk if necessary.
* *
* If IO is necessary and running under generic_make_request, returns -EAGAIN. * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
* *
* The btree node will have either a read or a write lock held, depending on * The btree node will have either a read or a write lock held, depending on
* level and op->lock. * level and op->lock.

View File

@ -1115,7 +1115,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
!blk_queue_discard(bdev_get_queue(dc->bdev))) !blk_queue_discard(bdev_get_queue(dc->bdev)))
bio->bi_end_io(bio); bio->bi_end_io(bio);
else else
generic_make_request(bio); submit_bio_noacct(bio);
} }
static void quit_max_writeback_rate(struct cache_set *c, static void quit_max_writeback_rate(struct cache_set *c,
@ -1197,7 +1197,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
if (!bio->bi_iter.bi_size) { if (!bio->bi_iter.bi_size) {
/* /*
* can't call bch_journal_meta from under * can't call bch_journal_meta from under
* generic_make_request * submit_bio_noacct
*/ */
continue_at_nobarrier(&s->cl, continue_at_nobarrier(&s->cl,
cached_dev_nodata, cached_dev_nodata,
@ -1311,8 +1311,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
if (!bio->bi_iter.bi_size) { if (!bio->bi_iter.bi_size) {
/* /*
* can't call bch_journal_meta from under * can't call bch_journal_meta from under submit_bio_noacct
* generic_make_request
*/ */
continue_at_nobarrier(&s->cl, continue_at_nobarrier(&s->cl,
flash_dev_nodata, flash_dev_nodata,

View File

@ -886,7 +886,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
static void accounted_request(struct cache *cache, struct bio *bio) static void accounted_request(struct cache *cache, struct bio *bio)
{ {
accounted_begin(cache, bio); accounted_begin(cache, bio);
generic_make_request(bio); submit_bio_noacct(bio);
} }
static void issue_op(struct bio *bio, void *context) static void issue_op(struct bio *bio, void *context)
@ -1792,7 +1792,7 @@ static bool process_bio(struct cache *cache, struct bio *bio)
bool commit_needed; bool commit_needed;
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
generic_make_request(bio); submit_bio_noacct(bio);
return commit_needed; return commit_needed;
} }
@ -1858,7 +1858,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
if (cache->features.discard_passdown) { if (cache->features.discard_passdown) {
remap_to_origin(cache, bio); remap_to_origin(cache, bio);
generic_make_request(bio); submit_bio_noacct(bio);
} else } else
bio_endio(bio); bio_endio(bio);

View File

@ -330,7 +330,7 @@ static void submit_bios(struct bio_list *bios)
blk_start_plug(&plug); blk_start_plug(&plug);
while ((bio = bio_list_pop(bios))) while ((bio = bio_list_pop(bios)))
generic_make_request(bio); submit_bio_noacct(bio);
blk_finish_plug(&plug); blk_finish_plug(&plug);
} }
@ -346,7 +346,7 @@ static void submit_bios(struct bio_list *bios)
static void issue_bio(struct clone *clone, struct bio *bio) static void issue_bio(struct clone *clone, struct bio *bio)
{ {
if (!bio_triggers_commit(clone, bio)) { if (!bio_triggers_commit(clone, bio)) {
generic_make_request(bio); submit_bio_noacct(bio);
return; return;
} }
@ -473,7 +473,7 @@ static void complete_discard_bio(struct clone *clone, struct bio *bio, bool succ
bio_region_range(clone, bio, &rs, &nr_regions); bio_region_range(clone, bio, &rs, &nr_regions);
trim_bio(bio, region_to_sector(clone, rs), trim_bio(bio, region_to_sector(clone, rs),
nr_regions << clone->region_shift); nr_regions << clone->region_shift);
generic_make_request(bio); submit_bio_noacct(bio);
} else } else
bio_endio(bio); bio_endio(bio);
} }
@ -865,7 +865,7 @@ static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio
bio->bi_private = hd; bio->bi_private = hd;
atomic_inc(&hd->clone->hydrations_in_flight); atomic_inc(&hd->clone->hydrations_in_flight);
generic_make_request(bio); submit_bio_noacct(bio);
} }
/* /*
@ -1281,7 +1281,7 @@ static void process_deferred_flush_bios(struct clone *clone)
*/ */
bio_endio(bio); bio_endio(bio);
} else { } else {
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }
} }

View File

@ -1789,7 +1789,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
return 1; return 1;
} }
generic_make_request(clone); submit_bio_noacct(clone);
return 0; return 0;
} }
@ -1815,7 +1815,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
{ {
struct bio *clone = io->ctx.bio_out; struct bio *clone = io->ctx.bio_out;
generic_make_request(clone); submit_bio_noacct(clone);
} }
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
@ -1893,7 +1893,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
clone->bi_iter.bi_sector = cc->start + io->sector; clone->bi_iter.bi_sector = cc->start + io->sector;
if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
generic_make_request(clone); submit_bio_noacct(clone);
return; return;
} }

View File

@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
while (bio) { while (bio) {
n = bio->bi_next; n = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
generic_make_request(bio); submit_bio_noacct(bio);
bio = n; bio = n;
} }
} }

View File

@ -1265,7 +1265,7 @@ static void process_deferred_bios(struct era *era)
bio_io_error(bio); bio_io_error(bio);
else else
while ((bio = bio_list_pop(&marked_bios))) while ((bio = bio_list_pop(&marked_bios)))
generic_make_request(bio); submit_bio_noacct(bio);
} }
static void process_rpc_calls(struct era *era) static void process_rpc_calls(struct era *era)

View File

@ -2115,12 +2115,12 @@ offload_to_thread:
dio->in_flight = (atomic_t)ATOMIC_INIT(1); dio->in_flight = (atomic_t)ATOMIC_INIT(1);
dio->completion = NULL; dio->completion = NULL;
generic_make_request(bio); submit_bio_noacct(bio);
return; return;
} }
generic_make_request(bio); submit_bio_noacct(bio);
if (need_sync_io) { if (need_sync_io) {
wait_for_completion_io(&read_comp); wait_for_completion_io(&read_comp);

View File

@ -677,7 +677,7 @@ static void process_queued_bios(struct work_struct *work)
bio_endio(bio); bio_endio(bio);
break; break;
case DM_MAPIO_REMAPPED: case DM_MAPIO_REMAPPED:
generic_make_request(bio); submit_bio_noacct(bio);
break; break;
case DM_MAPIO_SUBMITTED: case DM_MAPIO_SUBMITTED:
break; break;

View File

@ -779,7 +779,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
wakeup_mirrord(ms); wakeup_mirrord(ms);
} else { } else {
map_bio(get_default_mirror(ms), bio); map_bio(get_default_mirror(ms), bio);
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }
} }

View File

@ -252,7 +252,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
/* /*
* Issue the synchronous I/O from a different thread * Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion. * to avoid submit_bio_noacct recursion.
*/ */
INIT_WORK_ONSTACK(&req.work, do_metadata); INIT_WORK_ONSTACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work); queue_work(ps->metadata_wq, &req.work);

View File

@ -1568,7 +1568,7 @@ static void flush_bios(struct bio *bio)
while (bio) { while (bio) {
n = bio->bi_next; n = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
generic_make_request(bio); submit_bio_noacct(bio);
bio = n; bio = n;
} }
} }
@ -1588,7 +1588,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
bio->bi_next = NULL; bio->bi_next = NULL;
r = do_origin(s->origin, bio, false); r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED) if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio); submit_bio_noacct(bio);
bio = n; bio = n;
} }
} }
@ -1829,7 +1829,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
bio->bi_end_io = full_bio_end_io; bio->bi_end_io = full_bio_end_io;
bio->bi_private = callback_data; bio->bi_private = callback_data;
generic_make_request(bio); submit_bio_noacct(bio);
} }
static struct dm_snap_pending_exception * static struct dm_snap_pending_exception *

View File

@ -758,7 +758,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
struct pool *pool = tc->pool; struct pool *pool = tc->pool;
if (!bio_triggers_commit(tc, bio)) { if (!bio_triggers_commit(tc, bio)) {
generic_make_request(bio); submit_bio_noacct(bio);
return; return;
} }
@ -2394,7 +2394,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio->bi_opf & REQ_PREFLUSH) if (bio->bi_opf & REQ_PREFLUSH)
bio_endio(bio); bio_endio(bio);
else else
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }

View File

@ -681,7 +681,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_submit_prefetch(v, io); verity_submit_prefetch(v, io);
generic_make_request(bio); submit_bio_noacct(bio);
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
} }

View File

@ -1238,7 +1238,7 @@ static int writecache_flush_thread(void *data)
bio_end_sector(bio)); bio_end_sector(bio));
wc_unlock(wc); wc_unlock(wc);
bio_set_dev(bio, wc->dev->bdev); bio_set_dev(bio, wc->dev->bdev);
generic_make_request(bio); submit_bio_noacct(bio);
} else { } else {
writecache_flush(wc); writecache_flush(wc);
wc_unlock(wc); wc_unlock(wc);

View File

@ -140,7 +140,7 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
bio_advance(bio, clone->bi_iter.bi_size); bio_advance(bio, clone->bi_iter.bi_size);
refcount_inc(&bioctx->ref); refcount_inc(&bioctx->ref);
generic_make_request(clone); submit_bio_noacct(clone);
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks; zone->wp_block += nr_blocks;

View File

@ -1305,7 +1305,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
if (md->type == DM_TYPE_NVME_BIO_BASED) if (md->type == DM_TYPE_NVME_BIO_BASED)
ret = direct_make_request(clone); ret = direct_make_request(clone);
else else
ret = generic_make_request(clone); ret = submit_bio_noacct(clone);
break; break;
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
free_tio(tio); free_tio(tio);
@ -1652,7 +1652,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
error = __split_and_process_non_flush(&ci); error = __split_and_process_non_flush(&ci);
if (current->bio_list && ci.sector_count && !error) { if (current->bio_list && ci.sector_count && !error) {
/* /*
* Remainder must be passed to generic_make_request() * Remainder must be passed to submit_bio_noacct()
* so that it gets handled *after* bios already submitted * so that it gets handled *after* bios already submitted
* have been completely processed. * have been completely processed.
* We take a clone of the original to store in * We take a clone of the original to store in
@ -1677,7 +1677,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
bio_chain(b, bio); bio_chain(b, bio);
trace_block_split(md->queue, b, bio->bi_iter.bi_sector); trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
ret = generic_make_request(bio); ret = submit_bio_noacct(bio);
break; break;
} }
} }
@ -1745,7 +1745,7 @@ static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struc
bio_chain(split, *bio); bio_chain(split, *bio);
trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
generic_make_request(*bio); submit_bio_noacct(*bio);
*bio = split; *bio = split;
} }
} }
@ -2500,7 +2500,7 @@ static void dm_wq_work(struct work_struct *work)
break; break;
if (dm_request_based(md)) if (dm_request_based(md))
(void) generic_make_request(c); (void) submit_bio_noacct(c);
else else
(void) dm_process_bio(md, map, c); (void) dm_process_bio(md, map, c);
} }

View File

@ -169,7 +169,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
if (bio_data_dir(bio) == WRITE) { if (bio_data_dir(bio) == WRITE) {
/* write request */ /* write request */
if (atomic_read(&conf->counters[WriteAll])) { if (atomic_read(&conf->counters[WriteAll])) {
/* special case - don't decrement, don't generic_make_request, /* special case - don't decrement, don't submit_bio_noacct,
* just fail immediately * just fail immediately
*/ */
bio_io_error(bio); bio_io_error(bio);
@ -214,7 +214,7 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
} else } else
bio_set_dev(bio, conf->rdev->bdev); bio_set_dev(bio, conf->rdev->bdev);
generic_make_request(bio); submit_bio_noacct(bio);
return true; return true;
} }

View File

@ -267,7 +267,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio, end_sector - bio_sector, struct bio *split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, &mddev->bio_set); GFP_NOIO, &mddev->bio_set);
bio_chain(split, bio); bio_chain(split, bio);
generic_make_request(bio); submit_bio_noacct(bio);
bio = split; bio = split;
} }
@ -286,7 +286,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio_sector); bio_sector);
mddev_check_writesame(mddev, bio); mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio); mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio); submit_bio_noacct(bio);
} }
return true; return true;

View File

@ -131,7 +131,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->bio.bi_private = mp_bh; mp_bh->bio.bi_private = mp_bh;
mddev_check_writesame(mddev, &mp_bh->bio); mddev_check_writesame(mddev, &mp_bh->bio);
mddev_check_write_zeroes(mddev, &mp_bh->bio); mddev_check_write_zeroes(mddev, &mp_bh->bio);
generic_make_request(&mp_bh->bio); submit_bio_noacct(&mp_bh->bio);
return true; return true;
} }
@ -348,7 +348,7 @@ static void multipathd(struct md_thread *thread)
bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request; bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh; bio->bi_private = mp_bh;
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);

View File

@ -495,7 +495,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
&mddev->bio_set); &mddev->bio_set);
bio_chain(split, bio); bio_chain(split, bio);
generic_make_request(bio); submit_bio_noacct(bio);
bio = split; bio = split;
end = zone->zone_end; end = zone->zone_end;
} else } else
@ -559,7 +559,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
trace_block_bio_remap(bdev_get_queue(rdev->bdev), trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk), discard_bio, disk_devt(mddev->gendisk),
bio->bi_iter.bi_sector); bio->bi_iter.bi_sector);
generic_make_request(discard_bio); submit_bio_noacct(discard_bio);
} }
bio_endio(bio); bio_endio(bio);
} }
@ -600,7 +600,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split = bio_split(bio, sectors, GFP_NOIO, struct bio *split = bio_split(bio, sectors, GFP_NOIO,
&mddev->bio_set); &mddev->bio_set);
bio_chain(split, bio); bio_chain(split, bio);
generic_make_request(bio); submit_bio_noacct(bio);
bio = split; bio = split;
} }
@ -633,7 +633,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
disk_devt(mddev->gendisk), bio_sector); disk_devt(mddev->gendisk), bio_sector);
mddev_check_writesame(mddev, bio); mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio); mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio); submit_bio_noacct(bio);
return true; return true;
} }

View File

@ -834,7 +834,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
generic_make_request(bio); submit_bio_noacct(bio);
bio = next; bio = next;
cond_resched(); cond_resched();
} }
@ -1312,7 +1312,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors, struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split); gfp, &conf->bio_split);
bio_chain(split, bio); bio_chain(split, bio);
generic_make_request(bio); submit_bio_noacct(bio);
bio = split; bio = split;
r1_bio->master_bio = bio; r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors; r1_bio->sectors = max_sectors;
@ -1338,7 +1338,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
disk_devt(mddev->gendisk), r1_bio->sector); disk_devt(mddev->gendisk), r1_bio->sector);
generic_make_request(read_bio); submit_bio_noacct(read_bio);
} }
static void raid1_write_request(struct mddev *mddev, struct bio *bio, static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@ -1483,7 +1483,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors, struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split); GFP_NOIO, &conf->bio_split);
bio_chain(split, bio); bio_chain(split, bio);
generic_make_request(bio); submit_bio_noacct(bio);
bio = split; bio = split;
r1_bio->master_bio = bio; r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors; r1_bio->sectors = max_sectors;
@ -2240,7 +2240,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
atomic_inc(&r1_bio->remaining); atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio); submit_bio_noacct(wbio);
} }
put_sync_write_buf(r1_bio, 1); put_sync_write_buf(r1_bio, 1);
@ -2926,7 +2926,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
md_sync_acct_bio(bio, nr_sectors); md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1) if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST; bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }
} else { } else {
@ -2935,7 +2935,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
md_sync_acct_bio(bio, nr_sectors); md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1) if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST; bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio); submit_bio_noacct(bio);
} }
return nr_sectors; return nr_sectors;
} }

View File

@ -917,7 +917,7 @@ static void flush_pending_writes(struct r10conf *conf)
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
generic_make_request(bio); submit_bio_noacct(bio);
bio = next; bio = next;
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
@ -1102,7 +1102,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
generic_make_request(bio); submit_bio_noacct(bio);
bio = next; bio = next;
} }
kfree(plug); kfree(plug);
@ -1194,7 +1194,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
gfp, &conf->bio_split); gfp, &conf->bio_split);
bio_chain(split, bio); bio_chain(split, bio);
allow_barrier(conf); allow_barrier(conf);
generic_make_request(bio); submit_bio_noacct(bio);
wait_barrier(conf); wait_barrier(conf);
bio = split; bio = split;
r10_bio->master_bio = bio; r10_bio->master_bio = bio;
@ -1221,7 +1221,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(read_bio->bi_disk->queue, trace_block_bio_remap(read_bio->bi_disk->queue,
read_bio, disk_devt(mddev->gendisk), read_bio, disk_devt(mddev->gendisk),
r10_bio->sector); r10_bio->sector);
generic_make_request(read_bio); submit_bio_noacct(read_bio);
return; return;
} }
@ -1479,7 +1479,7 @@ retry_write:
GFP_NOIO, &conf->bio_split); GFP_NOIO, &conf->bio_split);
bio_chain(split, bio); bio_chain(split, bio);
allow_barrier(conf); allow_barrier(conf);
generic_make_request(bio); submit_bio_noacct(bio);
wait_barrier(conf); wait_barrier(conf);
bio = split; bio = split;
r10_bio->master_bio = bio; r10_bio->master_bio = bio;
@ -2099,7 +2099,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_opf |= MD_FAILFAST; tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
generic_make_request(tbio); submit_bio_noacct(tbio);
} }
/* Now write out to any replacement devices /* Now write out to any replacement devices
@ -2118,7 +2118,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev, md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(tbio)); bio_sectors(tbio));
generic_make_request(tbio); submit_bio_noacct(tbio);
} }
done: done:
@ -2241,7 +2241,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
wbio = r10_bio->devs[1].bio; wbio = r10_bio->devs[1].bio;
wbio2 = r10_bio->devs[1].repl_bio; wbio2 = r10_bio->devs[1].repl_bio;
/* Need to test wbio2->bi_end_io before we call /* Need to test wbio2->bi_end_io before we call
* generic_make_request as if the former is NULL, * submit_bio_noacct as if the former is NULL,
* the latter is free to free wbio2. * the latter is free to free wbio2.
*/ */
if (wbio2 && !wbio2->bi_end_io) if (wbio2 && !wbio2->bi_end_io)
@ -2249,13 +2249,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (wbio->bi_end_io) { if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio); submit_bio_noacct(wbio);
} }
if (wbio2) { if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending); atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev, md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2)); bio_sectors(wbio2));
generic_make_request(wbio2); submit_bio_noacct(wbio2);
} }
} }
@ -2889,7 +2889,7 @@ static void raid10_set_cluster_sync_high(struct r10conf *conf)
* a number of r10_bio structures, one for each out-of-sync device. * a number of r10_bio structures, one for each out-of-sync device.
* As we setup these structures, we collect all bio's together into a list * As we setup these structures, we collect all bio's together into a list
* which we then process collectively to add pages, and then process again * which we then process collectively to add pages, and then process again
* to pass to generic_make_request. * to pass to submit_bio_noacct.
* *
* The r10_bio structures are linked using a borrowed master_bio pointer. * The r10_bio structures are linked using a borrowed master_bio pointer.
* This link is counted in ->remaining. When the r10_bio that points to NULL * This link is counted in ->remaining. When the r10_bio that points to NULL
@ -3496,7 +3496,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io == end_sync_read) { if (bio->bi_end_io == end_sync_read) {
md_sync_acct_bio(bio, nr_sectors); md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0; bio->bi_status = 0;
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }
@ -4654,7 +4654,7 @@ read_more:
md_sync_acct_bio(read_bio, r10_bio->sectors); md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL; read_bio->bi_next = NULL;
generic_make_request(read_bio); submit_bio_noacct(read_bio);
sectors_done += nr_sectors; sectors_done += nr_sectors;
if (sector_nr <= last) if (sector_nr <= last)
goto read_more; goto read_more;
@ -4717,7 +4717,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
md_sync_acct_bio(b, r10_bio->sectors); md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
b->bi_next = NULL; b->bi_next = NULL;
generic_make_request(b); submit_bio_noacct(b);
} }
end_reshape_request(r10_bio); end_reshape_request(r10_bio);
} }

View File

@ -873,7 +873,7 @@ static void dispatch_bio_list(struct bio_list *tmp)
struct bio *bio; struct bio *bio;
while ((bio = bio_list_pop(tmp))) while ((bio = bio_list_pop(tmp)))
generic_make_request(bio); submit_bio_noacct(bio);
} }
static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b)
@ -1151,7 +1151,7 @@ again:
if (should_defer && op_is_write(op)) if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, bi); bio_list_add(&pending_bios, bi);
else else
generic_make_request(bi); submit_bio_noacct(bi);
} }
if (rrdev) { if (rrdev) {
if (s->syncing || s->expanding || s->expanded if (s->syncing || s->expanding || s->expanded
@ -1201,7 +1201,7 @@ again:
if (should_defer && op_is_write(op)) if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, rbi); bio_list_add(&pending_bios, rbi);
else else
generic_make_request(rbi); submit_bio_noacct(rbi);
} }
if (!rdev && !rrdev) { if (!rdev && !rrdev) {
if (op_is_write(op)) if (op_is_write(op))
@ -5289,7 +5289,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
trace_block_bio_remap(align_bi->bi_disk->queue, trace_block_bio_remap(align_bi->bi_disk->queue,
align_bi, disk_devt(mddev->gendisk), align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector); raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi); submit_bio_noacct(align_bi);
return 1; return 1;
} else { } else {
rcu_read_unlock(); rcu_read_unlock();
@ -5309,7 +5309,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
bio_chain(split, raid_bio); bio_chain(split, raid_bio);
generic_make_request(raid_bio); submit_bio_noacct(raid_bio);
raid_bio = split; raid_bio = split;
} }

View File

@ -351,7 +351,7 @@ static void nvme_requeue_work(struct work_struct *work)
* path. * path.
*/ */
bio->bi_disk = head->disk; bio->bi_disk = head->disk;
generic_make_request(bio); submit_bio_noacct(bio);
} }
} }

View File

@ -852,7 +852,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk); extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio); blk_qc_t submit_bio_noacct(struct bio *bio);
extern blk_qc_t direct_make_request(struct bio *bio); extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *); extern void blk_put_request(struct request *);