mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-13 14:04:05 +08:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Final block fixes for 3.16 Four small fixes that should go into 3.16, have been queued up for a bit and delayed due to vacation and other euro duties. But here they are. The pull request contains: - Fix for a reported crash with shared tagging on SCSI from Christoph - A regression fix for drbd. From Lars Ellenberg. - Hooking up the compat ioctl for BLKZEROOUT, which requires no translation. From Mikulas. - A fix for a regression where we woud crash on queue exit if the root_blkg is gone/not there. From Tejun" * 'for-linus' of git://git.kernel.dk/linux-block: block: provide compat ioctl for BLKZEROOUT blkcg: don't call into policy draining if root_blkg is already gone drbd: fix regression 'out of mem, failed to invoke fence-peer helper' block: don't assume last put of shared tags is for the host
This commit is contained in:
commit
6890ad4b38
@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
|
||||
{
|
||||
lockdep_assert_held(q->queue_lock);
|
||||
|
||||
/*
|
||||
* @q could be exiting and already have destroyed all blkgs as
|
||||
* indicated by NULL root_blkg. If so, don't confuse policies.
|
||||
*/
|
||||
if (!q->root_blkg)
|
||||
return;
|
||||
|
||||
blk_throtl_drain(q);
|
||||
}
|
||||
|
||||
|
@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
|
||||
EXPORT_SYMBOL(blk_queue_find_tag);
|
||||
|
||||
/**
|
||||
* __blk_free_tags - release a given set of tag maintenance info
|
||||
* blk_free_tags - release a given set of tag maintenance info
|
||||
* @bqt: the tag map to free
|
||||
*
|
||||
* Tries to free the specified @bqt. Returns true if it was
|
||||
* actually freed and false if there are still references using it
|
||||
* Drop the reference count on @bqt and frees it when the last reference
|
||||
* is dropped.
|
||||
*/
|
||||
static int __blk_free_tags(struct blk_queue_tag *bqt)
|
||||
void blk_free_tags(struct blk_queue_tag *bqt)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = atomic_dec_and_test(&bqt->refcnt);
|
||||
if (retval) {
|
||||
if (atomic_dec_and_test(&bqt->refcnt)) {
|
||||
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
|
||||
bqt->max_depth);
|
||||
|
||||
@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
|
||||
|
||||
kfree(bqt);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_free_tags);
|
||||
|
||||
/**
|
||||
* __blk_queue_free_tags - release tag maintenance info
|
||||
@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
|
||||
if (!bqt)
|
||||
return;
|
||||
|
||||
__blk_free_tags(bqt);
|
||||
blk_free_tags(bqt);
|
||||
|
||||
q->queue_tags = NULL;
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_free_tags - release a given set of tag maintenance info
|
||||
* @bqt: the tag map to free
|
||||
*
|
||||
* For externally managed @bqt frees the map. Callers of this
|
||||
* function must guarantee to have released all the queues that
|
||||
* might have been using this tag map.
|
||||
*/
|
||||
void blk_free_tags(struct blk_queue_tag *bqt)
|
||||
{
|
||||
if (unlikely(!__blk_free_tags(bqt)))
|
||||
BUG();
|
||||
}
|
||||
EXPORT_SYMBOL(blk_free_tags);
|
||||
|
||||
/**
|
||||
* blk_queue_free_tags - release tag maintenance info
|
||||
* @q: the request queue for the device
|
||||
|
@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
case BLKROSET:
|
||||
case BLKDISCARD:
|
||||
case BLKSECDISCARD:
|
||||
case BLKZEROOUT:
|
||||
/*
|
||||
* the ones below are implemented in blkdev_locked_ioctl,
|
||||
* but we call blkdev_ioctl, which gets the lock for us
|
||||
|
@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
|
||||
struct task_struct *opa;
|
||||
|
||||
kref_get(&connection->kref);
|
||||
/* We may just have force_sig()'ed this thread
|
||||
* to get it out of some blocking network function.
|
||||
* Clear signals; otherwise kthread_run(), which internally uses
|
||||
* wait_on_completion_killable(), will mistake our pending signal
|
||||
* for a new fatal signal and fail. */
|
||||
flush_signals(current);
|
||||
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
|
||||
if (IS_ERR(opa)) {
|
||||
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
|
||||
|
Loading…
Reference in New Issue
Block a user