mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 23:54:26 +08:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Been sitting on this for a while, but lets get this out the door. This fixes various important bugs for 3.3 final, along with a few more trivial ones. Please pull!" * 'for-linus' of git://git.kernel.dk/linux-block: block: fix ioc leak in put_io_context block, sx8: fix pointer math issue getting fw version Block: use a freezable workqueue for disk-event polling drivers/block/DAC960: fix -Wuninitialized warning drivers/block/DAC960: fix DAC960_V2_IOCTL_Opcode_T -Wenum-compare warning block: fix __blkdev_get and add_disk race condition block: Fix setting bio flags in drivers (sd_dif/floppy) block: Fix NULL pointer dereference in sd_revalidate_disk block: exit_io_context() should call elevator_exit_icq_fn() block: simplify ioc_release_fn() block: replace icq->changed with icq->flags
This commit is contained in:
commit
f1cbd03f5e
133
block/blk-ioc.c
133
block/blk-ioc.c
@ -36,10 +36,22 @@ static void icq_free_icq_rcu(struct rcu_head *head)
|
||||
kmem_cache_free(icq->__rcu_icq_cache, icq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Exit and free an icq. Called with both ioc and q locked.
|
||||
*/
|
||||
/* Exit an icq. Called with both ioc and q locked. */
|
||||
static void ioc_exit_icq(struct io_cq *icq)
|
||||
{
|
||||
struct elevator_type *et = icq->q->elevator->type;
|
||||
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
return;
|
||||
|
||||
if (et->ops.elevator_exit_icq_fn)
|
||||
et->ops.elevator_exit_icq_fn(icq);
|
||||
|
||||
icq->flags |= ICQ_EXITED;
|
||||
}
|
||||
|
||||
/* Release an icq. Called with both ioc and q locked. */
|
||||
static void ioc_destroy_icq(struct io_cq *icq)
|
||||
{
|
||||
struct io_context *ioc = icq->ioc;
|
||||
struct request_queue *q = icq->q;
|
||||
@ -60,8 +72,7 @@ static void ioc_exit_icq(struct io_cq *icq)
|
||||
if (rcu_dereference_raw(ioc->icq_hint) == icq)
|
||||
rcu_assign_pointer(ioc->icq_hint, NULL);
|
||||
|
||||
if (et->ops.elevator_exit_icq_fn)
|
||||
et->ops.elevator_exit_icq_fn(icq);
|
||||
ioc_exit_icq(icq);
|
||||
|
||||
/*
|
||||
* @icq->q might have gone away by the time RCU callback runs
|
||||
@ -79,7 +90,6 @@ static void ioc_release_fn(struct work_struct *work)
|
||||
{
|
||||
struct io_context *ioc = container_of(work, struct io_context,
|
||||
release_work);
|
||||
struct request_queue *last_q = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
@ -93,44 +103,19 @@ static void ioc_release_fn(struct work_struct *work)
|
||||
while (!hlist_empty(&ioc->icq_list)) {
|
||||
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
|
||||
struct io_cq, ioc_node);
|
||||
struct request_queue *this_q = icq->q;
|
||||
struct request_queue *q = icq->q;
|
||||
|
||||
if (this_q != last_q) {
|
||||
/*
|
||||
* Need to switch to @this_q. Once we release
|
||||
* @ioc->lock, it can go away along with @cic.
|
||||
* Hold on to it.
|
||||
*/
|
||||
__blk_get_queue(this_q);
|
||||
|
||||
/*
|
||||
* blk_put_queue() might sleep thanks to kobject
|
||||
* idiocy. Always release both locks, put and
|
||||
* restart.
|
||||
*/
|
||||
if (last_q) {
|
||||
spin_unlock(last_q->queue_lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
blk_put_queue(last_q);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
last_q = this_q;
|
||||
spin_lock_irqsave(this_q->queue_lock, flags);
|
||||
spin_lock_nested(&ioc->lock, 1);
|
||||
continue;
|
||||
if (spin_trylock(q->queue_lock)) {
|
||||
ioc_destroy_icq(icq);
|
||||
spin_unlock(q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
cpu_relax();
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
}
|
||||
ioc_exit_icq(icq);
|
||||
}
|
||||
|
||||
if (last_q) {
|
||||
spin_unlock(last_q->queue_lock);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
blk_put_queue(last_q);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
}
|
||||
@ -145,6 +130,7 @@ static void ioc_release_fn(struct work_struct *work)
|
||||
void put_io_context(struct io_context *ioc)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool free_ioc = false;
|
||||
|
||||
if (ioc == NULL)
|
||||
return;
|
||||
@ -159,8 +145,13 @@ void put_io_context(struct io_context *ioc)
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
if (!hlist_empty(&ioc->icq_list))
|
||||
schedule_work(&ioc->release_work);
|
||||
else
|
||||
free_ioc = true;
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
if (free_ioc)
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
}
|
||||
EXPORT_SYMBOL(put_io_context);
|
||||
|
||||
@ -168,13 +159,41 @@ EXPORT_SYMBOL(put_io_context);
|
||||
void exit_io_context(struct task_struct *task)
|
||||
{
|
||||
struct io_context *ioc;
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
unsigned long flags;
|
||||
|
||||
task_lock(task);
|
||||
ioc = task->io_context;
|
||||
task->io_context = NULL;
|
||||
task_unlock(task);
|
||||
|
||||
atomic_dec(&ioc->nr_tasks);
|
||||
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
|
||||
put_io_context(ioc);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Need ioc lock to walk icq_list and q lock to exit icq. Perform
|
||||
* reverse double locking. Read comment in ioc_release_fn() for
|
||||
* explanation on the nested locking annotation.
|
||||
*/
|
||||
retry:
|
||||
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
|
||||
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
continue;
|
||||
if (spin_trylock(icq->q->queue_lock)) {
|
||||
ioc_exit_icq(icq);
|
||||
spin_unlock(icq->q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
|
||||
put_io_context(ioc);
|
||||
}
|
||||
|
||||
@ -194,7 +213,7 @@ void ioc_clear_queue(struct request_queue *q)
|
||||
struct io_context *ioc = icq->ioc;
|
||||
|
||||
spin_lock(&ioc->lock);
|
||||
ioc_exit_icq(icq);
|
||||
ioc_destroy_icq(icq);
|
||||
spin_unlock(&ioc->lock);
|
||||
}
|
||||
}
|
||||
@ -363,13 +382,13 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
|
||||
return icq;
|
||||
}
|
||||
|
||||
void ioc_set_changed(struct io_context *ioc, int which)
|
||||
void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
|
||||
{
|
||||
struct io_cq *icq;
|
||||
struct hlist_node *n;
|
||||
|
||||
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
|
||||
set_bit(which, &icq->changed);
|
||||
icq->flags |= flags;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -387,7 +406,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc->ioprio = ioprio;
|
||||
ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
|
||||
ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
|
||||
@ -404,11 +423,33 @@ void ioc_cgroup_changed(struct io_context *ioc)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
|
||||
ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ioc_cgroup_changed);
|
||||
|
||||
/**
|
||||
* icq_get_changed - fetch and clear icq changed mask
|
||||
* @icq: icq of interest
|
||||
*
|
||||
* Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
|
||||
* @icq->ioc->lock.
|
||||
*/
|
||||
unsigned icq_get_changed(struct io_cq *icq)
|
||||
{
|
||||
unsigned int changed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
|
||||
spin_lock_irqsave(&icq->ioc->lock, flags);
|
||||
changed = icq->flags & ICQ_CHANGED_MASK;
|
||||
icq->flags &= ~ICQ_CHANGED_MASK;
|
||||
spin_unlock_irqrestore(&icq->ioc->lock, flags);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
EXPORT_SYMBOL(icq_get_changed);
|
||||
|
||||
static int __init blk_ioc_init(void)
|
||||
{
|
||||
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
||||
|
@ -3470,20 +3470,20 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||
const int rw = rq_data_dir(rq);
|
||||
const bool is_sync = rq_is_sync(rq);
|
||||
struct cfq_queue *cfqq;
|
||||
unsigned int changed;
|
||||
|
||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
/* handle changed notifications */
|
||||
if (unlikely(cic->icq.changed)) {
|
||||
if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
|
||||
changed_ioprio(cic);
|
||||
changed = icq_get_changed(&cic->icq);
|
||||
if (unlikely(changed & ICQ_IOPRIO_CHANGED))
|
||||
changed_ioprio(cic);
|
||||
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
||||
if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
|
||||
changed_cgroup(cic);
|
||||
if (unlikely(changed & ICQ_CGROUP_CHANGED))
|
||||
changed_cgroup(cic);
|
||||
#endif
|
||||
}
|
||||
|
||||
new_queue:
|
||||
cfqq = cic_to_cfqq(cic, is_sync);
|
||||
|
@ -35,6 +35,7 @@ static DEFINE_IDR(ext_devt_idr);
|
||||
|
||||
static struct device_type disk_type;
|
||||
|
||||
static void disk_alloc_events(struct gendisk *disk);
|
||||
static void disk_add_events(struct gendisk *disk);
|
||||
static void disk_del_events(struct gendisk *disk);
|
||||
static void disk_release_events(struct gendisk *disk);
|
||||
@ -601,6 +602,8 @@ void add_disk(struct gendisk *disk)
|
||||
disk->major = MAJOR(devt);
|
||||
disk->first_minor = MINOR(devt);
|
||||
|
||||
disk_alloc_events(disk);
|
||||
|
||||
/* Register BDI before referencing it from bdev */
|
||||
bdi = &disk->queue->backing_dev_info;
|
||||
bdi_register_dev(bdi, disk_devt(disk));
|
||||
@ -1475,9 +1478,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
|
||||
intv = disk_events_poll_jiffies(disk);
|
||||
set_timer_slack(&ev->dwork.timer, intv / 4);
|
||||
if (check_now)
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
else if (intv)
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&ev->lock, flags);
|
||||
}
|
||||
@ -1521,7 +1524,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
|
||||
ev->clearing |= mask;
|
||||
if (!ev->block) {
|
||||
cancel_delayed_work(&ev->dwork);
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
}
|
||||
spin_unlock_irq(&ev->lock);
|
||||
}
|
||||
@ -1558,7 +1561,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
|
||||
|
||||
/* uncondtionally schedule event check and wait for it to finish */
|
||||
disk_block_events(disk);
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
flush_delayed_work(&ev->dwork);
|
||||
__disk_unblock_events(disk, false);
|
||||
|
||||
@ -1595,7 +1598,7 @@ static void disk_events_workfn(struct work_struct *work)
|
||||
|
||||
intv = disk_events_poll_jiffies(disk);
|
||||
if (!ev->block && intv)
|
||||
queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
|
||||
|
||||
spin_unlock_irq(&ev->lock);
|
||||
|
||||
@ -1733,9 +1736,9 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
|
||||
&disk_events_dfl_poll_msecs, 0644);
|
||||
|
||||
/*
|
||||
* disk_{add|del|release}_events - initialize and destroy disk_events.
|
||||
* disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
|
||||
*/
|
||||
static void disk_add_events(struct gendisk *disk)
|
||||
static void disk_alloc_events(struct gendisk *disk)
|
||||
{
|
||||
struct disk_events *ev;
|
||||
|
||||
@ -1748,16 +1751,6 @@ static void disk_add_events(struct gendisk *disk)
|
||||
return;
|
||||
}
|
||||
|
||||
if (sysfs_create_files(&disk_to_dev(disk)->kobj,
|
||||
disk_events_attrs) < 0) {
|
||||
pr_warn("%s: failed to create sysfs files for events\n",
|
||||
disk->disk_name);
|
||||
kfree(ev);
|
||||
return;
|
||||
}
|
||||
|
||||
disk->ev = ev;
|
||||
|
||||
INIT_LIST_HEAD(&ev->node);
|
||||
ev->disk = disk;
|
||||
spin_lock_init(&ev->lock);
|
||||
@ -1766,8 +1759,21 @@ static void disk_add_events(struct gendisk *disk)
|
||||
ev->poll_msecs = -1;
|
||||
INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
|
||||
|
||||
disk->ev = ev;
|
||||
}
|
||||
|
||||
static void disk_add_events(struct gendisk *disk)
|
||||
{
|
||||
if (!disk->ev)
|
||||
return;
|
||||
|
||||
/* FIXME: error handling */
|
||||
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
|
||||
pr_warn("%s: failed to create sysfs files for events\n",
|
||||
disk->disk_name);
|
||||
|
||||
mutex_lock(&disk_events_mutex);
|
||||
list_add_tail(&ev->node, &disk_events);
|
||||
list_add_tail(&disk->ev->node, &disk_events);
|
||||
mutex_unlock(&disk_events_mutex);
|
||||
|
||||
/*
|
||||
|
@ -389,17 +389,11 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
|
||||
}
|
||||
}
|
||||
|
||||
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
{
|
||||
struct parsed_partitions *state = NULL;
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
int p, highest, res;
|
||||
rescan:
|
||||
if (state && !IS_ERR(state)) {
|
||||
kfree(state);
|
||||
state = NULL;
|
||||
}
|
||||
int res;
|
||||
|
||||
if (bdev->bd_part_count)
|
||||
return -EBUSY;
|
||||
@ -412,6 +406,24 @@ rescan:
|
||||
delete_partition(disk, part->partno);
|
||||
disk_part_iter_exit(&piter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
{
|
||||
struct parsed_partitions *state = NULL;
|
||||
struct hd_struct *part;
|
||||
int p, highest, res;
|
||||
rescan:
|
||||
if (state && !IS_ERR(state)) {
|
||||
kfree(state);
|
||||
state = NULL;
|
||||
}
|
||||
|
||||
res = drop_partitions(disk, bdev);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
if (disk->fops->revalidate_disk)
|
||||
disk->fops->revalidate_disk(disk);
|
||||
check_disk_size_change(disk, bdev);
|
||||
@ -515,6 +527,26 @@ rescan:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
|
||||
{
|
||||
int res;
|
||||
|
||||
if (!bdev->bd_invalidated)
|
||||
return 0;
|
||||
|
||||
res = drop_partitions(disk, bdev);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
set_capacity(disk, 0);
|
||||
check_disk_size_change(disk, bdev);
|
||||
bdev->bd_invalidated = 0;
|
||||
/* tell userspace that the media / partition table may have changed */
|
||||
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
|
||||
{
|
||||
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
||||
|
@ -1177,7 +1177,8 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
|
||||
int TimeoutCounter;
|
||||
int i;
|
||||
|
||||
|
||||
memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
|
||||
|
||||
if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
|
||||
return DAC960_Failure(Controller, "DMA mask out of range");
|
||||
Controller->BounceBufferLimit = DMA_BIT_MASK(32);
|
||||
@ -4627,7 +4628,8 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
||||
DAC960_Controller_T *Controller = Command->Controller;
|
||||
DAC960_CommandType_T CommandType = Command->CommandType;
|
||||
DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
|
||||
DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode;
|
||||
DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode;
|
||||
DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode;
|
||||
DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus;
|
||||
|
||||
if (CommandType == DAC960_ReadCommand ||
|
||||
@ -4699,7 +4701,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
||||
{
|
||||
if (Controller->ShutdownMonitoringTimer)
|
||||
return;
|
||||
if (CommandOpcode == DAC960_V2_GetControllerInfo)
|
||||
if (IOCTLOpcode == DAC960_V2_GetControllerInfo)
|
||||
{
|
||||
DAC960_V2_ControllerInfo_T *NewControllerInfo =
|
||||
Controller->V2.NewControllerInformation;
|
||||
@ -4719,14 +4721,14 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
||||
memcpy(ControllerInfo, NewControllerInfo,
|
||||
sizeof(DAC960_V2_ControllerInfo_T));
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetEvent)
|
||||
else if (IOCTLOpcode == DAC960_V2_GetEvent)
|
||||
{
|
||||
if (CommandStatus == DAC960_V2_NormalCompletion) {
|
||||
DAC960_V2_ReportEvent(Controller, Controller->V2.Event);
|
||||
}
|
||||
Controller->V2.NextEventSequenceNumber++;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
|
||||
else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
|
||||
CommandStatus == DAC960_V2_NormalCompletion)
|
||||
{
|
||||
DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
|
||||
@ -4915,7 +4917,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
||||
NewPhysicalDeviceInfo->LogicalUnit++;
|
||||
Controller->V2.PhysicalDeviceIndex++;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
|
||||
else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
|
||||
{
|
||||
unsigned int DeviceIndex;
|
||||
for (DeviceIndex = Controller->V2.PhysicalDeviceIndex;
|
||||
@ -4938,7 +4940,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
||||
}
|
||||
Controller->V2.NeedPhysicalDeviceInformation = false;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
|
||||
else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
|
||||
CommandStatus == DAC960_V2_NormalCompletion)
|
||||
{
|
||||
DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
|
||||
@ -5065,7 +5067,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
|
||||
[LogicalDeviceNumber] = true;
|
||||
NewLogicalDeviceInfo->LogicalDeviceNumber++;
|
||||
}
|
||||
else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
|
||||
else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
|
||||
{
|
||||
int LogicalDriveNumber;
|
||||
for (LogicalDriveNumber = 0;
|
||||
|
@ -1120,7 +1120,7 @@ static inline void carm_handle_resp(struct carm_host *host,
|
||||
break;
|
||||
case MISC_GET_FW_VER: {
|
||||
struct carm_fw_ver *ver = (struct carm_fw_ver *)
|
||||
mem + sizeof(struct carm_msg_get_fw_ver);
|
||||
(mem + sizeof(struct carm_msg_get_fw_ver));
|
||||
if (!error) {
|
||||
host->fw_ver = le32_to_cpu(ver->version);
|
||||
host->flags |= (ver->features & FL_FW_VER_MASK);
|
||||
|
@ -1183,8 +1183,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
* The latter is necessary to prevent ghost
|
||||
* partitions on a removed medium.
|
||||
*/
|
||||
if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
|
||||
rescan_partitions(disk, bdev);
|
||||
if (bdev->bd_invalidated) {
|
||||
if (!ret)
|
||||
rescan_partitions(disk, bdev);
|
||||
else if (ret == -ENOMEDIUM)
|
||||
invalidate_partitions(disk, bdev);
|
||||
}
|
||||
if (ret)
|
||||
goto out_clear;
|
||||
} else {
|
||||
@ -1214,8 +1218,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
if (bdev->bd_disk->fops->open)
|
||||
ret = bdev->bd_disk->fops->open(bdev, mode);
|
||||
/* the same as first opener case, read comment there */
|
||||
if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
|
||||
rescan_partitions(bdev->bd_disk, bdev);
|
||||
if (bdev->bd_invalidated) {
|
||||
if (!ret)
|
||||
rescan_partitions(bdev->bd_disk, bdev);
|
||||
else if (ret == -ENOMEDIUM)
|
||||
invalidate_partitions(bdev->bd_disk, bdev);
|
||||
}
|
||||
if (ret)
|
||||
goto out_unlock_bdev;
|
||||
}
|
||||
|
@ -596,6 +596,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
|
||||
|
||||
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
|
||||
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
|
||||
extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
|
||||
extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
|
||||
int partno, sector_t start,
|
||||
sector_t len, int flags,
|
||||
|
@ -6,8 +6,11 @@
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
enum {
|
||||
ICQ_IOPRIO_CHANGED,
|
||||
ICQ_CGROUP_CHANGED,
|
||||
ICQ_IOPRIO_CHANGED = 1 << 0,
|
||||
ICQ_CGROUP_CHANGED = 1 << 1,
|
||||
ICQ_EXITED = 1 << 2,
|
||||
|
||||
ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -88,7 +91,7 @@ struct io_cq {
|
||||
struct rcu_head __rcu_head;
|
||||
};
|
||||
|
||||
unsigned long changed;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -139,6 +142,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
|
||||
gfp_t gfp_flags, int node);
|
||||
void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
|
||||
void ioc_cgroup_changed(struct io_context *ioc);
|
||||
unsigned int icq_get_changed(struct io_cq *icq);
|
||||
#else
|
||||
struct io_context;
|
||||
static inline void put_io_context(struct io_context *ioc) { }
|
||||
|
@ -289,12 +289,16 @@ enum {
|
||||
*
|
||||
* system_freezable_wq is equivalent to system_wq except that it's
|
||||
* freezable.
|
||||
*
|
||||
* system_nrt_freezable_wq is equivalent to system_nrt_wq except that
|
||||
* it's freezable.
|
||||
*/
|
||||
extern struct workqueue_struct *system_wq;
|
||||
extern struct workqueue_struct *system_long_wq;
|
||||
extern struct workqueue_struct *system_nrt_wq;
|
||||
extern struct workqueue_struct *system_unbound_wq;
|
||||
extern struct workqueue_struct *system_freezable_wq;
|
||||
extern struct workqueue_struct *system_nrt_freezable_wq;
|
||||
|
||||
extern struct workqueue_struct *
|
||||
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
||||
|
@ -253,11 +253,13 @@ struct workqueue_struct *system_long_wq __read_mostly;
|
||||
struct workqueue_struct *system_nrt_wq __read_mostly;
|
||||
struct workqueue_struct *system_unbound_wq __read_mostly;
|
||||
struct workqueue_struct *system_freezable_wq __read_mostly;
|
||||
struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(system_wq);
|
||||
EXPORT_SYMBOL_GPL(system_long_wq);
|
||||
EXPORT_SYMBOL_GPL(system_nrt_wq);
|
||||
EXPORT_SYMBOL_GPL(system_unbound_wq);
|
||||
EXPORT_SYMBOL_GPL(system_freezable_wq);
|
||||
EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/workqueue.h>
|
||||
@ -3833,8 +3835,11 @@ static int __init init_workqueues(void)
|
||||
WQ_UNBOUND_MAX_ACTIVE);
|
||||
system_freezable_wq = alloc_workqueue("events_freezable",
|
||||
WQ_FREEZABLE, 0);
|
||||
system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
|
||||
WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
|
||||
BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
|
||||
!system_unbound_wq || !system_freezable_wq);
|
||||
!system_unbound_wq || !system_freezable_wq ||
|
||||
!system_nrt_freezable_wq);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_workqueues);
|
||||
|
Loading…
Reference in New Issue
Block a user