mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
. Fix dm-cache corruption caused by discard_block_size >
cache_block_size . Fix a lock-inversion detected by LOCKDEP in dm-cache . Fix a dangling bio bug in the dm-thinp target's process_deferred_bios error path . Fix corruption due to non-atomic transaction commit which allowed a metadata superblock to be written before all other metadata was successfully written -- this is common to all targets that use the persistent-data library's transaction manager (dm-thinp, dm-cache and dm-era). . Various small cleanups in the DM core . Add the dm-era target which is useful for keeping track of which blocks were written within a user defined period of time called an 'era'. Use cases include tracking changed blocks for backup software, and partially invalidating the contents of a cache to restore cache coherency after rolling back a vendor snapshot. . Improve the on-disk layout of multithreaded writes to the dm-thin-pool by splitting the pool's deferred bio list to be a per-thin device list and then sorting that list using an rb_tree. The subsequent read throughput of the data written via multiple threads improved by ~70%. . Simplify the multipath target's handling of queuing IO by pushing requests back to the request queue rather than queueing the IO internally. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJTPv/6AAoJEMUj8QotnQNagQYH/3EkB2f66TRfjRQpVAZuchw/ U0IbVWcMJKMdhj3uaSNzIkAbTgF+QsZUOLHP/7Q6zLq0M2J3WGrJn2ELqq53MenF E0+rJ8duKnJ5oLhhVT62ukBDh3XHWT0JyijXPWNa2gUoYwJqM9BAlXbC/OTfUNaZ mBCxvUWGME8k3ht310GhwvzBQjYuxIXhw8XlbGvakb9S83SZwNpCh231iumOEzPe Vzfx/xTto0fH3R5/knNV/H9xt0Dv4vt4Aqbqqys9UbQvPzj9qN/mxUZIFg+LZh/w WuvHHw6HcAiNNrQGFcm6i1AK2jJ+F61K3afMlYsiamTxMNM+0q/B9HemkX/0ieU= =lY8m -----END PGP SIGNATURE----- Merge tag 'dm-3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper changes from Mike Snitzer: - Fix dm-cache corruption caused by discard_block_size > cache_block_size - Fix a lock-inversion detected by LOCKDEP in dm-cache - Fix a dangling bio bug in the dm-thinp target's process_deferred_bios error path - Fix corruption due to non-atomic transaction commit which allowed a metadata superblock to be written before all other metadata was successfully written -- this is common to all targets that use the persistent-data library's transaction manager (dm-thinp, dm-cache and dm-era). - Various small cleanups in the DM core - Add the dm-era target which is useful for keeping track of which blocks were written within a user defined period of time called an 'era'. Use cases include tracking changed blocks for backup software, and partially invalidating the contents of a cache to restore cache coherency after rolling back a vendor snapshot. - Improve the on-disk layout of multithreaded writes to the dm-thin-pool by splitting the pool's deferred bio list to be a per-thin device list and then sorting that list using an rb_tree. The subsequent read throughput of the data written via multiple threads improved by ~70%. - Simplify the multipath target's handling of queuing IO by pushing requests back to the request queue rather than queueing the IO internally. * tag 'dm-3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (24 commits) dm cache: fix a lock-inversion dm thin: sort the per thin deferred bios using an rb_tree dm thin: use per thin device deferred bio lists dm thin: simplify pool_is_congested dm thin: fix dangling bio in process_deferred_bios error path dm mpath: print more useful warnings in multipath_message() dm-mpath: do not activate failed paths dm mpath: remove extra nesting in map function dm mpath: remove map_io() dm mpath: reduce memory pressure when requeuing dm mpath: remove process_queued_ios() dm mpath: push back requests instead of queueing dm table: add dm_table_run_md_queue_async dm mpath: do not call pg_init when it is already running dm: use RCU_INIT_POINTER instead of rcu_assign_pointer in __unbind dm: stop using bi_private dm: remove dm_get_mapinfo dm: make dm_table_alloc_md_mempools static dm: take care to copy the space map roots before locking the superblock dm transaction manager: fix corruption due to non-atomic transaction commit ...
This commit is contained in:
commit
04535d273e
108
Documentation/device-mapper/era.txt
Normal file
108
Documentation/device-mapper/era.txt
Normal file
@ -0,0 +1,108 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
dm-era is a target that behaves similar to the linear target. In
|
||||
addition it keeps track of which blocks were written within a user
|
||||
defined period of time called an 'era'. Each era target instance
|
||||
maintains the current era as a monotonically increasing 32-bit
|
||||
counter.
|
||||
|
||||
Use cases include tracking changed blocks for backup software, and
|
||||
partially invalidating the contents of a cache to restore cache
|
||||
coherency after rolling back a vendor snapshot.
|
||||
|
||||
Constructor
|
||||
===========
|
||||
|
||||
era <metadata dev> <origin dev> <block size>
|
||||
|
||||
metadata dev : fast device holding the persistent metadata
|
||||
origin dev : device holding data blocks that may change
|
||||
block size : block size of origin data device, granularity that is
|
||||
tracked by the target
|
||||
|
||||
Messages
|
||||
========
|
||||
|
||||
None of the dm messages take any arguments.
|
||||
|
||||
checkpoint
|
||||
----------
|
||||
|
||||
Possibly move to a new era. You shouldn't assume the era has
|
||||
incremented. After sending this message, you should check the
|
||||
current era via the status line.
|
||||
|
||||
take_metadata_snap
|
||||
------------------
|
||||
|
||||
Create a clone of the metadata, to allow a userland process to read it.
|
||||
|
||||
drop_metadata_snap
|
||||
------------------
|
||||
|
||||
Drop the metadata snapshot.
|
||||
|
||||
Status
|
||||
======
|
||||
|
||||
<metadata block size> <#used metadata blocks>/<#total metadata blocks>
|
||||
<current era> <held metadata root | '-'>
|
||||
|
||||
metadata block size : Fixed block size for each metadata block in
|
||||
sectors
|
||||
#used metadata blocks : Number of metadata blocks used
|
||||
#total metadata blocks : Total number of metadata blocks
|
||||
current era : The current era
|
||||
held metadata root : The location, in blocks, of the metadata root
|
||||
that has been 'held' for userspace read
|
||||
access. '-' indicates there is no held root
|
||||
|
||||
Detailed use case
|
||||
=================
|
||||
|
||||
The scenario of invalidating a cache when rolling back a vendor
|
||||
snapshot was the primary use case when developing this target:
|
||||
|
||||
Taking a vendor snapshot
|
||||
------------------------
|
||||
|
||||
- Send a checkpoint message to the era target
|
||||
- Make a note of the current era in its status line
|
||||
- Take vendor snapshot (the era and snapshot should be forever
|
||||
associated now).
|
||||
|
||||
Rolling back to an vendor snapshot
|
||||
----------------------------------
|
||||
|
||||
- Cache enters passthrough mode (see: dm-cache's docs in cache.txt)
|
||||
- Rollback vendor storage
|
||||
- Take metadata snapshot
|
||||
- Ascertain which blocks have been written since the snapshot was taken
|
||||
by checking each block's era
|
||||
- Invalidate those blocks in the caching software
|
||||
- Cache returns to writeback/writethrough mode
|
||||
|
||||
Memory usage
|
||||
============
|
||||
|
||||
The target uses a bitset to record writes in the current era. It also
|
||||
has a spare bitset ready for switching over to a new era. Other than
|
||||
that it uses a few 4k blocks for updating metadata.
|
||||
|
||||
(4 * nr_blocks) bytes + buffers
|
||||
|
||||
Resilience
|
||||
==========
|
||||
|
||||
Metadata is updated on disk before a write to a previously unwritten
|
||||
block is performed. As such dm-era should not be effected by a hard
|
||||
crash such as power failure.
|
||||
|
||||
Userland tools
|
||||
==============
|
||||
|
||||
Userland tools are found in the increasingly poorly named
|
||||
thin-provisioning-tools project:
|
||||
|
||||
https://github.com/jthornber/thin-provisioning-tools
|
@ -285,6 +285,17 @@ config DM_CACHE_CLEANER
|
||||
A simple cache policy that writes back all data to the
|
||||
origin. Used when decommissioning a dm-cache.
|
||||
|
||||
config DM_ERA
|
||||
tristate "Era target (EXPERIMENTAL)"
|
||||
depends on BLK_DEV_DM
|
||||
default n
|
||||
select DM_PERSISTENT_DATA
|
||||
select DM_BIO_PRISON
|
||||
---help---
|
||||
dm-era tracks which parts of a block device are written to
|
||||
over time. Useful for maintaining cache coherency when using
|
||||
vendor snapshots.
|
||||
|
||||
config DM_MIRROR
|
||||
tristate "Mirror target"
|
||||
depends on BLK_DEV_DM
|
||||
|
@ -14,6 +14,7 @@ dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
|
||||
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
|
||||
dm-cache-mq-y += dm-cache-policy-mq.o
|
||||
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
|
||||
dm-era-y += dm-era-target.o
|
||||
md-mod-y += md.o bitmap.o
|
||||
raid456-y += raid5.o
|
||||
|
||||
@ -53,6 +54,7 @@ obj-$(CONFIG_DM_VERITY) += dm-verity.o
|
||||
obj-$(CONFIG_DM_CACHE) += dm-cache.o
|
||||
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
|
||||
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
|
||||
obj-$(CONFIG_DM_ERA) += dm-era.o
|
||||
|
||||
ifeq ($(CONFIG_DM_UEVENT),y)
|
||||
dm-mod-objs += dm-uevent.o
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
typedef dm_block_t __bitwise__ dm_oblock_t;
|
||||
typedef uint32_t __bitwise__ dm_cblock_t;
|
||||
typedef dm_block_t __bitwise__ dm_dblock_t;
|
||||
|
||||
static inline dm_oblock_t to_oblock(dm_block_t b)
|
||||
{
|
||||
@ -41,14 +40,4 @@ static inline uint32_t from_cblock(dm_cblock_t b)
|
||||
return (__force uint32_t) b;
|
||||
}
|
||||
|
||||
static inline dm_dblock_t to_dblock(dm_block_t b)
|
||||
{
|
||||
return (__force dm_dblock_t) b;
|
||||
}
|
||||
|
||||
static inline dm_block_t from_dblock(dm_dblock_t b)
|
||||
{
|
||||
return (__force dm_block_t) b;
|
||||
}
|
||||
|
||||
#endif /* DM_CACHE_BLOCK_TYPES_H */
|
||||
|
@ -109,7 +109,7 @@ struct dm_cache_metadata {
|
||||
dm_block_t discard_root;
|
||||
|
||||
sector_t discard_block_size;
|
||||
dm_dblock_t discard_nr_blocks;
|
||||
dm_oblock_t discard_nr_blocks;
|
||||
|
||||
sector_t data_block_size;
|
||||
dm_cblock_t cache_blocks;
|
||||
@ -120,6 +120,12 @@ struct dm_cache_metadata {
|
||||
unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
|
||||
size_t policy_hint_size;
|
||||
struct dm_cache_statistics stats;
|
||||
|
||||
/*
|
||||
* Reading the space map root can fail, so we read it into this
|
||||
* buffer before the superblock is locked and updated.
|
||||
*/
|
||||
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
|
||||
};
|
||||
|
||||
/*-------------------------------------------------------------------
|
||||
@ -260,11 +266,31 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
static int __save_sm_root(struct dm_cache_metadata *cmd)
|
||||
{
|
||||
int r;
|
||||
size_t metadata_len;
|
||||
|
||||
r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
|
||||
metadata_len);
|
||||
}
|
||||
|
||||
static void __copy_sm_root(struct dm_cache_metadata *cmd,
|
||||
struct cache_disk_superblock *disk_super)
|
||||
{
|
||||
memcpy(&disk_super->metadata_space_map_root,
|
||||
&cmd->metadata_space_map_root,
|
||||
sizeof(cmd->metadata_space_map_root));
|
||||
}
|
||||
|
||||
static int __write_initial_superblock(struct dm_cache_metadata *cmd)
|
||||
{
|
||||
int r;
|
||||
struct dm_block *sblock;
|
||||
size_t metadata_len;
|
||||
struct cache_disk_superblock *disk_super;
|
||||
sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
|
||||
|
||||
@ -272,12 +298,16 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
|
||||
if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
|
||||
bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
|
||||
|
||||
r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
|
||||
r = dm_tm_pre_commit(cmd->tm);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dm_tm_pre_commit(cmd->tm);
|
||||
if (r < 0)
|
||||
/*
|
||||
* dm_sm_copy_root() can fail. So we need to do it before we start
|
||||
* updating the superblock.
|
||||
*/
|
||||
r = __save_sm_root(cmd);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = superblock_lock_zero(cmd, &sblock);
|
||||
@ -293,16 +323,13 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
|
||||
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
|
||||
disk_super->policy_hint_size = 0;
|
||||
|
||||
r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
|
||||
metadata_len);
|
||||
if (r < 0)
|
||||
goto bad_locked;
|
||||
__copy_sm_root(cmd, disk_super);
|
||||
|
||||
disk_super->mapping_root = cpu_to_le64(cmd->root);
|
||||
disk_super->hint_root = cpu_to_le64(cmd->hint_root);
|
||||
disk_super->discard_root = cpu_to_le64(cmd->discard_root);
|
||||
disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
|
||||
disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
|
||||
disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks));
|
||||
disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
|
||||
disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
|
||||
disk_super->cache_blocks = cpu_to_le32(0);
|
||||
@ -313,10 +340,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
|
||||
disk_super->write_misses = cpu_to_le32(0);
|
||||
|
||||
return dm_tm_commit(cmd->tm, sblock);
|
||||
|
||||
bad_locked:
|
||||
dm_bm_unlock(sblock);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int __format_metadata(struct dm_cache_metadata *cmd)
|
||||
@ -496,7 +519,7 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
|
||||
cmd->hint_root = le64_to_cpu(disk_super->hint_root);
|
||||
cmd->discard_root = le64_to_cpu(disk_super->discard_root);
|
||||
cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
|
||||
cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
|
||||
cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks));
|
||||
cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
|
||||
cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
|
||||
strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
|
||||
@ -530,8 +553,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
|
||||
disk_super = dm_block_data(sblock);
|
||||
update_flags(disk_super, mutator);
|
||||
read_superblock_fields(cmd, disk_super);
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
return dm_bm_flush_and_unlock(cmd->bm, sblock);
|
||||
return dm_bm_flush(cmd->bm);
|
||||
}
|
||||
|
||||
static int __begin_transaction(struct dm_cache_metadata *cmd)
|
||||
@ -559,7 +583,6 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
|
||||
flags_mutator mutator)
|
||||
{
|
||||
int r;
|
||||
size_t metadata_len;
|
||||
struct cache_disk_superblock *disk_super;
|
||||
struct dm_block *sblock;
|
||||
|
||||
@ -577,8 +600,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
|
||||
if (r < 0)
|
||||
r = __save_sm_root(cmd);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = superblock_lock(cmd, &sblock);
|
||||
@ -594,7 +617,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
|
||||
disk_super->hint_root = cpu_to_le64(cmd->hint_root);
|
||||
disk_super->discard_root = cpu_to_le64(cmd->discard_root);
|
||||
disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
|
||||
disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
|
||||
disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks));
|
||||
disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
|
||||
strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
|
||||
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
|
||||
@ -605,13 +628,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
|
||||
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
|
||||
disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
|
||||
disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
|
||||
|
||||
r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
|
||||
metadata_len);
|
||||
if (r < 0) {
|
||||
dm_bm_unlock(sblock);
|
||||
return r;
|
||||
}
|
||||
__copy_sm_root(cmd, disk_super);
|
||||
|
||||
return dm_tm_commit(cmd->tm, sblock);
|
||||
}
|
||||
@ -771,15 +788,15 @@ out:
|
||||
|
||||
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
|
||||
sector_t discard_block_size,
|
||||
dm_dblock_t new_nr_entries)
|
||||
dm_oblock_t new_nr_entries)
|
||||
{
|
||||
int r;
|
||||
|
||||
down_write(&cmd->root_lock);
|
||||
r = dm_bitset_resize(&cmd->discard_info,
|
||||
cmd->discard_root,
|
||||
from_dblock(cmd->discard_nr_blocks),
|
||||
from_dblock(new_nr_entries),
|
||||
from_oblock(cmd->discard_nr_blocks),
|
||||
from_oblock(new_nr_entries),
|
||||
false, &cmd->discard_root);
|
||||
if (!r) {
|
||||
cmd->discard_block_size = discard_block_size;
|
||||
@ -792,28 +809,28 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
|
||||
static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b)
|
||||
{
|
||||
return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
|
||||
from_dblock(b), &cmd->discard_root);
|
||||
from_oblock(b), &cmd->discard_root);
|
||||
}
|
||||
|
||||
static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
|
||||
static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b)
|
||||
{
|
||||
return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
|
||||
from_dblock(b), &cmd->discard_root);
|
||||
from_oblock(b), &cmd->discard_root);
|
||||
}
|
||||
|
||||
static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
|
||||
static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b,
|
||||
bool *is_discarded)
|
||||
{
|
||||
return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
|
||||
from_dblock(b), &cmd->discard_root,
|
||||
from_oblock(b), &cmd->discard_root,
|
||||
is_discarded);
|
||||
}
|
||||
|
||||
static int __discard(struct dm_cache_metadata *cmd,
|
||||
dm_dblock_t dblock, bool discard)
|
||||
dm_oblock_t dblock, bool discard)
|
||||
{
|
||||
int r;
|
||||
|
||||
@ -826,7 +843,7 @@ static int __discard(struct dm_cache_metadata *cmd,
|
||||
}
|
||||
|
||||
int dm_cache_set_discard(struct dm_cache_metadata *cmd,
|
||||
dm_dblock_t dblock, bool discard)
|
||||
dm_oblock_t dblock, bool discard)
|
||||
{
|
||||
int r;
|
||||
|
||||
@ -844,8 +861,8 @@ static int __load_discards(struct dm_cache_metadata *cmd,
|
||||
dm_block_t b;
|
||||
bool discard;
|
||||
|
||||
for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
|
||||
dm_dblock_t dblock = to_dblock(b);
|
||||
for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) {
|
||||
dm_oblock_t dblock = to_oblock(b);
|
||||
|
||||
if (cmd->clean_when_opened) {
|
||||
r = __is_discarded(cmd, dblock, &discard);
|
||||
@ -1228,22 +1245,12 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
|
||||
static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint)
|
||||
{
|
||||
int r;
|
||||
|
||||
down_write(&cmd->root_lock);
|
||||
r = begin_hints(cmd, policy);
|
||||
up_write(&cmd->root_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
|
||||
uint32_t hint)
|
||||
{
|
||||
int r;
|
||||
struct dm_cache_metadata *cmd = context;
|
||||
__le32 value = cpu_to_le32(hint);
|
||||
int r;
|
||||
|
||||
__dm_bless_for_disk(&value);
|
||||
|
||||
r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
|
||||
@ -1253,16 +1260,25 @@ static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
|
||||
return r;
|
||||
}
|
||||
|
||||
int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
|
||||
uint32_t hint)
|
||||
static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!hints_array_initialized(cmd))
|
||||
return 0;
|
||||
r = begin_hints(cmd, policy);
|
||||
if (r) {
|
||||
DMERR("begin_hints failed");
|
||||
return r;
|
||||
}
|
||||
|
||||
return policy_walk_mappings(policy, save_hint, cmd);
|
||||
}
|
||||
|
||||
int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
|
||||
{
|
||||
int r;
|
||||
|
||||
down_write(&cmd->root_lock);
|
||||
r = save_hint(cmd, cblock, hint);
|
||||
r = write_hints(cmd, policy);
|
||||
up_write(&cmd->root_lock);
|
||||
|
||||
return r;
|
||||
|
@ -72,14 +72,14 @@ dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
|
||||
|
||||
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
|
||||
sector_t discard_block_size,
|
||||
dm_dblock_t new_nr_entries);
|
||||
dm_oblock_t new_nr_entries);
|
||||
|
||||
typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
|
||||
dm_dblock_t dblock, bool discarded);
|
||||
dm_oblock_t dblock, bool discarded);
|
||||
int dm_cache_load_discards(struct dm_cache_metadata *cmd,
|
||||
load_discard_fn fn, void *context);
|
||||
|
||||
int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
|
||||
int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard);
|
||||
|
||||
int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
|
||||
int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
|
||||
@ -128,14 +128,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd);
|
||||
* rather than querying the policy for each cblock, we let it walk its data
|
||||
* structures and fill in the hints in whatever order it wishes.
|
||||
*/
|
||||
|
||||
int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
|
||||
|
||||
/*
|
||||
* requests hints for every cblock and stores in the metadata device.
|
||||
*/
|
||||
int dm_cache_save_hint(struct dm_cache_metadata *cmd,
|
||||
dm_cblock_t cblock, uint32_t hint);
|
||||
int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
|
||||
|
||||
/*
|
||||
* Query method. Are all the blocks in the cache clean?
|
||||
|
@ -237,9 +237,8 @@ struct cache {
|
||||
/*
|
||||
* origin_blocks entries, discarded if set.
|
||||
*/
|
||||
dm_dblock_t discard_nr_blocks;
|
||||
dm_oblock_t discard_nr_blocks;
|
||||
unsigned long *discard_bitset;
|
||||
uint32_t discard_block_size; /* a power of 2 times sectors per block */
|
||||
|
||||
/*
|
||||
* Rather than reconstructing the table line for the status we just
|
||||
@ -526,48 +525,33 @@ static dm_block_t block_div(dm_block_t b, uint32_t n)
|
||||
return b;
|
||||
}
|
||||
|
||||
static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
|
||||
{
|
||||
uint32_t discard_blocks = cache->discard_block_size;
|
||||
dm_block_t b = from_oblock(oblock);
|
||||
|
||||
if (!block_size_is_power_of_two(cache))
|
||||
discard_blocks = discard_blocks / cache->sectors_per_block;
|
||||
else
|
||||
discard_blocks >>= cache->sectors_per_block_shift;
|
||||
|
||||
b = block_div(b, discard_blocks);
|
||||
|
||||
return to_dblock(b);
|
||||
}
|
||||
|
||||
static void set_discard(struct cache *cache, dm_dblock_t b)
|
||||
static void set_discard(struct cache *cache, dm_oblock_t b)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
atomic_inc(&cache->stats.discard_count);
|
||||
|
||||
spin_lock_irqsave(&cache->lock, flags);
|
||||
set_bit(from_dblock(b), cache->discard_bitset);
|
||||
set_bit(from_oblock(b), cache->discard_bitset);
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
}
|
||||
|
||||
static void clear_discard(struct cache *cache, dm_dblock_t b)
|
||||
static void clear_discard(struct cache *cache, dm_oblock_t b)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cache->lock, flags);
|
||||
clear_bit(from_dblock(b), cache->discard_bitset);
|
||||
clear_bit(from_oblock(b), cache->discard_bitset);
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
}
|
||||
|
||||
static bool is_discarded(struct cache *cache, dm_dblock_t b)
|
||||
static bool is_discarded(struct cache *cache, dm_oblock_t b)
|
||||
{
|
||||
int r;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cache->lock, flags);
|
||||
r = test_bit(from_dblock(b), cache->discard_bitset);
|
||||
r = test_bit(from_oblock(b), cache->discard_bitset);
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
|
||||
return r;
|
||||
@ -579,8 +563,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cache->lock, flags);
|
||||
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
|
||||
cache->discard_bitset);
|
||||
r = test_bit(from_oblock(b), cache->discard_bitset);
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
|
||||
return r;
|
||||
@ -705,7 +688,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
|
||||
check_if_tick_bio_needed(cache, bio);
|
||||
remap_to_origin(cache, bio);
|
||||
if (bio_data_dir(bio) == WRITE)
|
||||
clear_discard(cache, oblock_to_dblock(cache, oblock));
|
||||
clear_discard(cache, oblock);
|
||||
}
|
||||
|
||||
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
|
||||
@ -715,7 +698,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
|
||||
remap_to_cache(cache, bio, cblock);
|
||||
if (bio_data_dir(bio) == WRITE) {
|
||||
set_dirty(cache, oblock, cblock);
|
||||
clear_discard(cache, oblock_to_dblock(cache, oblock));
|
||||
clear_discard(cache, oblock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1288,14 +1271,14 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
|
||||
static void process_discard_bio(struct cache *cache, struct bio *bio)
|
||||
{
|
||||
dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
|
||||
cache->discard_block_size);
|
||||
cache->sectors_per_block);
|
||||
dm_block_t end_block = bio_end_sector(bio);
|
||||
dm_block_t b;
|
||||
|
||||
end_block = block_div(end_block, cache->discard_block_size);
|
||||
end_block = block_div(end_block, cache->sectors_per_block);
|
||||
|
||||
for (b = start_block; b < end_block; b++)
|
||||
set_discard(cache, to_dblock(b));
|
||||
set_discard(cache, to_oblock(b));
|
||||
|
||||
bio_endio(bio, 0);
|
||||
}
|
||||
@ -2171,35 +2154,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want the discard block size to be a power of two, at least the size
|
||||
* of the cache block size, and have no more than 2^14 discard blocks
|
||||
* across the origin.
|
||||
*/
|
||||
#define MAX_DISCARD_BLOCKS (1 << 14)
|
||||
|
||||
static bool too_many_discard_blocks(sector_t discard_block_size,
|
||||
sector_t origin_size)
|
||||
{
|
||||
(void) sector_div(origin_size, discard_block_size);
|
||||
|
||||
return origin_size > MAX_DISCARD_BLOCKS;
|
||||
}
|
||||
|
||||
static sector_t calculate_discard_block_size(sector_t cache_block_size,
|
||||
sector_t origin_size)
|
||||
{
|
||||
sector_t discard_block_size;
|
||||
|
||||
discard_block_size = roundup_pow_of_two(cache_block_size);
|
||||
|
||||
if (origin_size)
|
||||
while (too_many_discard_blocks(discard_block_size, origin_size))
|
||||
discard_block_size *= 2;
|
||||
|
||||
return discard_block_size;
|
||||
}
|
||||
|
||||
#define DEFAULT_MIGRATION_THRESHOLD 2048
|
||||
|
||||
static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
@ -2321,16 +2275,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
}
|
||||
clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
|
||||
|
||||
cache->discard_block_size =
|
||||
calculate_discard_block_size(cache->sectors_per_block,
|
||||
cache->origin_sectors);
|
||||
cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
|
||||
cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
|
||||
cache->discard_nr_blocks = cache->origin_blocks;
|
||||
cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks));
|
||||
if (!cache->discard_bitset) {
|
||||
*error = "could not allocate discard bitset";
|
||||
goto bad;
|
||||
}
|
||||
clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
|
||||
clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks));
|
||||
|
||||
cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
|
||||
if (IS_ERR(cache->copier)) {
|
||||
@ -2614,16 +2565,16 @@ static int write_discard_bitset(struct cache *cache)
|
||||
{
|
||||
unsigned i, r;
|
||||
|
||||
r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
|
||||
cache->discard_nr_blocks);
|
||||
r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block,
|
||||
cache->origin_blocks);
|
||||
if (r) {
|
||||
DMERR("could not resize on-disk discard bitset");
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
|
||||
r = dm_cache_set_discard(cache->cmd, to_dblock(i),
|
||||
is_discarded(cache, to_dblock(i)));
|
||||
for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) {
|
||||
r = dm_cache_set_discard(cache->cmd, to_oblock(i),
|
||||
is_discarded(cache, to_oblock(i)));
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -2631,30 +2582,6 @@ static int write_discard_bitset(struct cache *cache)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
|
||||
uint32_t hint)
|
||||
{
|
||||
struct cache *cache = context;
|
||||
return dm_cache_save_hint(cache->cmd, cblock, hint);
|
||||
}
|
||||
|
||||
static int write_hints(struct cache *cache)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = dm_cache_begin_hints(cache->cmd, cache->policy);
|
||||
if (r) {
|
||||
DMERR("dm_cache_begin_hints failed");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = policy_walk_mappings(cache->policy, save_hint, cache);
|
||||
if (r)
|
||||
DMERR("policy_walk_mappings failed");
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns true on success
|
||||
*/
|
||||
@ -2672,7 +2599,7 @@ static bool sync_metadata(struct cache *cache)
|
||||
|
||||
save_stats(cache);
|
||||
|
||||
r3 = write_hints(cache);
|
||||
r3 = dm_cache_write_hints(cache->cmd, cache->policy);
|
||||
if (r3)
|
||||
DMERR("could not write hints");
|
||||
|
||||
@ -2720,16 +2647,14 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
|
||||
}
|
||||
|
||||
static int load_discard(void *context, sector_t discard_block_size,
|
||||
dm_dblock_t dblock, bool discard)
|
||||
dm_oblock_t oblock, bool discard)
|
||||
{
|
||||
struct cache *cache = context;
|
||||
|
||||
/* FIXME: handle mis-matched block size */
|
||||
|
||||
if (discard)
|
||||
set_discard(cache, dblock);
|
||||
set_discard(cache, oblock);
|
||||
else
|
||||
clear_discard(cache, dblock);
|
||||
clear_discard(cache, oblock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3120,8 +3045,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
||||
/*
|
||||
* FIXME: these limits may be incompatible with the cache device
|
||||
*/
|
||||
limits->max_discard_sectors = cache->discard_block_size * 1024;
|
||||
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
|
||||
limits->max_discard_sectors = cache->sectors_per_block;
|
||||
limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
@ -3145,7 +3070,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
static struct target_type cache_target = {
|
||||
.name = "cache",
|
||||
.version = {1, 3, 0},
|
||||
.version = {1, 4, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = cache_ctr,
|
||||
.dtr = cache_dtr,
|
||||
|
1746
drivers/md/dm-era-target.c
Normal file
1746
drivers/md/dm-era-target.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -93,10 +93,6 @@ struct multipath {
|
||||
unsigned pg_init_count; /* Number of times pg_init called */
|
||||
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
|
||||
|
||||
unsigned queue_size;
|
||||
struct work_struct process_queued_ios;
|
||||
struct list_head queued_ios;
|
||||
|
||||
struct work_struct trigger_event;
|
||||
|
||||
/*
|
||||
@ -121,9 +117,9 @@ typedef int (*action_fn) (struct pgpath *pgpath);
|
||||
static struct kmem_cache *_mpio_cache;
|
||||
|
||||
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
|
||||
static void process_queued_ios(struct work_struct *work);
|
||||
static void trigger_event(struct work_struct *work);
|
||||
static void activate_path(struct work_struct *work);
|
||||
static int __pgpath_busy(struct pgpath *pgpath);
|
||||
|
||||
|
||||
/*-----------------------------------------------
|
||||
@ -195,11 +191,9 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
|
||||
m = kzalloc(sizeof(*m), GFP_KERNEL);
|
||||
if (m) {
|
||||
INIT_LIST_HEAD(&m->priority_groups);
|
||||
INIT_LIST_HEAD(&m->queued_ios);
|
||||
spin_lock_init(&m->lock);
|
||||
m->queue_io = 1;
|
||||
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
|
||||
INIT_WORK(&m->process_queued_ios, process_queued_ios);
|
||||
INIT_WORK(&m->trigger_event, trigger_event);
|
||||
init_waitqueue_head(&m->pg_init_wait);
|
||||
mutex_init(&m->work_mutex);
|
||||
@ -256,13 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info)
|
||||
* Path selection
|
||||
*-----------------------------------------------*/
|
||||
|
||||
static void __pg_init_all_paths(struct multipath *m)
|
||||
static int __pg_init_all_paths(struct multipath *m)
|
||||
{
|
||||
struct pgpath *pgpath;
|
||||
unsigned long pg_init_delay = 0;
|
||||
|
||||
if (m->pg_init_in_progress || m->pg_init_disabled)
|
||||
return 0;
|
||||
|
||||
m->pg_init_count++;
|
||||
m->pg_init_required = 0;
|
||||
|
||||
/* Check here to reset pg_init_required */
|
||||
if (!m->current_pg)
|
||||
return 0;
|
||||
|
||||
if (m->pg_init_delay_retry)
|
||||
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
|
||||
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
|
||||
@ -274,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m)
|
||||
pg_init_delay))
|
||||
m->pg_init_in_progress++;
|
||||
}
|
||||
return m->pg_init_in_progress;
|
||||
}
|
||||
|
||||
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
|
||||
@ -365,19 +368,26 @@ failed:
|
||||
*/
|
||||
static int __must_push_back(struct multipath *m)
|
||||
{
|
||||
return (m->queue_if_no_path != m->saved_queue_if_no_path &&
|
||||
dm_noflush_suspending(m->ti));
|
||||
return (m->queue_if_no_path ||
|
||||
(m->queue_if_no_path != m->saved_queue_if_no_path &&
|
||||
dm_noflush_suspending(m->ti)));
|
||||
}
|
||||
|
||||
static int map_io(struct multipath *m, struct request *clone,
|
||||
union map_info *map_context, unsigned was_queued)
|
||||
#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required)
|
||||
|
||||
/*
|
||||
* Map cloned requests
|
||||
*/
|
||||
static int multipath_map(struct dm_target *ti, struct request *clone,
|
||||
union map_info *map_context)
|
||||
{
|
||||
int r = DM_MAPIO_REMAPPED;
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
int r = DM_MAPIO_REQUEUE;
|
||||
size_t nr_bytes = blk_rq_bytes(clone);
|
||||
unsigned long flags;
|
||||
struct pgpath *pgpath;
|
||||
struct block_device *bdev;
|
||||
struct dm_mpath_io *mpio = map_context->ptr;
|
||||
struct dm_mpath_io *mpio;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
|
||||
@ -388,38 +398,33 @@ static int map_io(struct multipath *m, struct request *clone,
|
||||
|
||||
pgpath = m->current_pgpath;
|
||||
|
||||
if (was_queued)
|
||||
m->queue_size--;
|
||||
|
||||
if (m->pg_init_required) {
|
||||
if (!m->pg_init_in_progress)
|
||||
queue_work(kmultipathd, &m->process_queued_ios);
|
||||
r = DM_MAPIO_REQUEUE;
|
||||
} else if ((pgpath && m->queue_io) ||
|
||||
(!pgpath && m->queue_if_no_path)) {
|
||||
/* Queue for the daemon to resubmit */
|
||||
list_add_tail(&clone->queuelist, &m->queued_ios);
|
||||
m->queue_size++;
|
||||
if (!m->queue_io)
|
||||
queue_work(kmultipathd, &m->process_queued_ios);
|
||||
pgpath = NULL;
|
||||
r = DM_MAPIO_SUBMITTED;
|
||||
} else if (pgpath) {
|
||||
bdev = pgpath->path.dev->bdev;
|
||||
clone->q = bdev_get_queue(bdev);
|
||||
clone->rq_disk = bdev->bd_disk;
|
||||
} else if (__must_push_back(m))
|
||||
r = DM_MAPIO_REQUEUE;
|
||||
else
|
||||
r = -EIO; /* Failed */
|
||||
if (!pgpath) {
|
||||
if (!__must_push_back(m))
|
||||
r = -EIO; /* Failed */
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!pg_ready(m)) {
|
||||
__pg_init_all_paths(m);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (set_mapinfo(m, map_context) < 0)
|
||||
/* ENOMEM, requeue */
|
||||
goto out_unlock;
|
||||
|
||||
bdev = pgpath->path.dev->bdev;
|
||||
clone->q = bdev_get_queue(bdev);
|
||||
clone->rq_disk = bdev->bd_disk;
|
||||
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||
mpio = map_context->ptr;
|
||||
mpio->pgpath = pgpath;
|
||||
mpio->nr_bytes = nr_bytes;
|
||||
|
||||
if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
|
||||
pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
|
||||
if (pgpath->pg->ps.type->start_io)
|
||||
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
|
||||
&pgpath->path,
|
||||
nr_bytes);
|
||||
r = DM_MAPIO_REMAPPED;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
return r;
|
||||
@ -440,76 +445,14 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
|
||||
else
|
||||
m->saved_queue_if_no_path = queue_if_no_path;
|
||||
m->queue_if_no_path = queue_if_no_path;
|
||||
if (!m->queue_if_no_path && m->queue_size)
|
||||
queue_work(kmultipathd, &m->process_queued_ios);
|
||||
if (!m->queue_if_no_path)
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* The multipath daemon is responsible for resubmitting queued ios.
|
||||
*---------------------------------------------------------------*/
|
||||
|
||||
static void dispatch_queued_ios(struct multipath *m)
|
||||
{
|
||||
int r;
|
||||
unsigned long flags;
|
||||
union map_info *info;
|
||||
struct request *clone, *n;
|
||||
LIST_HEAD(cl);
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
list_splice_init(&m->queued_ios, &cl);
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(clone, n, &cl, queuelist) {
|
||||
list_del_init(&clone->queuelist);
|
||||
|
||||
info = dm_get_rq_mapinfo(clone);
|
||||
|
||||
r = map_io(m, clone, info, 1);
|
||||
if (r < 0) {
|
||||
clear_mapinfo(m, info);
|
||||
dm_kill_unmapped_request(clone, r);
|
||||
} else if (r == DM_MAPIO_REMAPPED)
|
||||
dm_dispatch_request(clone);
|
||||
else if (r == DM_MAPIO_REQUEUE) {
|
||||
clear_mapinfo(m, info);
|
||||
dm_requeue_unmapped_request(clone);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void process_queued_ios(struct work_struct *work)
|
||||
{
|
||||
struct multipath *m =
|
||||
container_of(work, struct multipath, process_queued_ios);
|
||||
struct pgpath *pgpath = NULL;
|
||||
unsigned must_queue = 1;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
|
||||
if (!m->current_pgpath)
|
||||
__choose_pgpath(m, 0);
|
||||
|
||||
pgpath = m->current_pgpath;
|
||||
|
||||
if ((pgpath && !m->queue_io) ||
|
||||
(!pgpath && !m->queue_if_no_path))
|
||||
must_queue = 0;
|
||||
|
||||
if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
|
||||
!m->pg_init_disabled)
|
||||
__pg_init_all_paths(m);
|
||||
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
if (!must_queue)
|
||||
dispatch_queued_ios(m);
|
||||
}
|
||||
|
||||
/*
|
||||
* An event is triggered whenever a path is taken out of use.
|
||||
* Includes path failure and PG bypass.
|
||||
@ -971,27 +914,6 @@ static void multipath_dtr(struct dm_target *ti)
|
||||
free_multipath(m);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map cloned requests
|
||||
*/
|
||||
static int multipath_map(struct dm_target *ti, struct request *clone,
|
||||
union map_info *map_context)
|
||||
{
|
||||
int r;
|
||||
struct multipath *m = (struct multipath *) ti->private;
|
||||
|
||||
if (set_mapinfo(m, map_context) < 0)
|
||||
/* ENOMEM, requeue */
|
||||
return DM_MAPIO_REQUEUE;
|
||||
|
||||
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||
r = map_io(m, clone, map_context, 0);
|
||||
if (r < 0 || r == DM_MAPIO_REQUEUE)
|
||||
clear_mapinfo(m, map_context);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take a path out of use.
|
||||
*/
|
||||
@ -1054,9 +976,9 @@ static int reinstate_path(struct pgpath *pgpath)
|
||||
|
||||
pgpath->is_active = 1;
|
||||
|
||||
if (!m->nr_valid_paths++ && m->queue_size) {
|
||||
if (!m->nr_valid_paths++) {
|
||||
m->current_pgpath = NULL;
|
||||
queue_work(kmultipathd, &m->process_queued_ios);
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
|
||||
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
|
||||
m->pg_init_in_progress++;
|
||||
@ -1252,11 +1174,12 @@ static void pg_init_done(void *data, int errors)
|
||||
/* Activations of other paths are still on going */
|
||||
goto out;
|
||||
|
||||
if (!m->pg_init_required)
|
||||
m->queue_io = 0;
|
||||
|
||||
m->pg_init_delay_retry = delay_retry;
|
||||
queue_work(kmultipathd, &m->process_queued_ios);
|
||||
if (m->pg_init_required) {
|
||||
m->pg_init_delay_retry = delay_retry;
|
||||
if (__pg_init_all_paths(m))
|
||||
goto out;
|
||||
}
|
||||
m->queue_io = 0;
|
||||
|
||||
/*
|
||||
* Wake up any thread waiting to suspend.
|
||||
@ -1272,8 +1195,11 @@ static void activate_path(struct work_struct *work)
|
||||
struct pgpath *pgpath =
|
||||
container_of(work, struct pgpath, activate_path.work);
|
||||
|
||||
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
|
||||
pg_init_done, pgpath);
|
||||
if (pgpath->is_active)
|
||||
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
|
||||
pg_init_done, pgpath);
|
||||
else
|
||||
pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
|
||||
}
|
||||
|
||||
static int noretry_error(int error)
|
||||
@ -1433,7 +1359,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
|
||||
|
||||
/* Features */
|
||||
if (type == STATUSTYPE_INFO)
|
||||
DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
|
||||
DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
|
||||
else {
|
||||
DMEMIT("%u ", m->queue_if_no_path +
|
||||
(m->pg_init_retries > 0) * 2 +
|
||||
@ -1552,7 +1478,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
|
||||
}
|
||||
|
||||
if (argc != 2) {
|
||||
DMWARN("Unrecognised multipath message received.");
|
||||
DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1570,7 +1496,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
|
||||
else if (!strcasecmp(argv[0], "fail_path"))
|
||||
action = fail_path;
|
||||
else {
|
||||
DMWARN("Unrecognised multipath message received.");
|
||||
DMWARN("Unrecognised multipath message received: %s", argv[0]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1632,8 +1558,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
r = err;
|
||||
}
|
||||
|
||||
if (r == -ENOTCONN && !fatal_signal_pending(current))
|
||||
queue_work(kmultipathd, &m->process_queued_ios);
|
||||
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
if (!m->current_pg) {
|
||||
/* Path status changed, redo selection */
|
||||
__choose_pgpath(m, 0);
|
||||
}
|
||||
if (m->pg_init_required)
|
||||
__pg_init_all_paths(m);
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
}
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
}
|
||||
@ -1684,7 +1619,7 @@ static int multipath_busy(struct dm_target *ti)
|
||||
spin_lock_irqsave(&m->lock, flags);
|
||||
|
||||
/* pg_init in progress, requeue until done */
|
||||
if (m->pg_init_in_progress) {
|
||||
if (!pg_ready(m)) {
|
||||
busy = 1;
|
||||
goto out;
|
||||
}
|
||||
@ -1737,7 +1672,7 @@ out:
|
||||
*---------------------------------------------------------------*/
|
||||
static struct target_type multipath_target = {
|
||||
.name = "multipath",
|
||||
.version = {1, 6, 0},
|
||||
.version = {1, 7, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = multipath_ctr,
|
||||
.dtr = multipath_dtr,
|
||||
|
@ -945,7 +945,7 @@ bool dm_table_request_based(struct dm_table *t)
|
||||
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
|
||||
}
|
||||
|
||||
int dm_table_alloc_md_mempools(struct dm_table *t)
|
||||
static int dm_table_alloc_md_mempools(struct dm_table *t)
|
||||
{
|
||||
unsigned type = dm_table_get_type(t);
|
||||
unsigned per_bio_data_size = 0;
|
||||
@ -1618,6 +1618,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
|
||||
}
|
||||
EXPORT_SYMBOL(dm_table_get_md);
|
||||
|
||||
void dm_table_run_md_queue_async(struct dm_table *t)
|
||||
{
|
||||
struct mapped_device *md;
|
||||
struct request_queue *queue;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dm_table_request_based(t))
|
||||
return;
|
||||
|
||||
md = dm_table_get_md(t);
|
||||
queue = dm_get_md_queue(md);
|
||||
if (queue) {
|
||||
spin_lock_irqsave(queue->queue_lock, flags);
|
||||
blk_run_queue_async(queue);
|
||||
spin_unlock_irqrestore(queue->queue_lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dm_table_run_md_queue_async);
|
||||
|
||||
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
|
@ -192,6 +192,13 @@ struct dm_pool_metadata {
|
||||
* operation possible in this state is the closing of the device.
|
||||
*/
|
||||
bool fail_io:1;
|
||||
|
||||
/*
|
||||
* Reading the space map roots can fail, so we read it into these
|
||||
* buffers before the superblock is locked and updated.
|
||||
*/
|
||||
__u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
|
||||
__u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
|
||||
};
|
||||
|
||||
struct dm_thin_device {
|
||||
@ -431,29 +438,56 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
|
||||
pmd->details_info.value_type.equal = NULL;
|
||||
}
|
||||
|
||||
static int save_sm_roots(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
int r;
|
||||
size_t len;
|
||||
|
||||
r = dm_sm_root_size(pmd->metadata_sm, &len);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dm_sm_root_size(pmd->data_sm, &len);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
|
||||
}
|
||||
|
||||
static void copy_sm_roots(struct dm_pool_metadata *pmd,
|
||||
struct thin_disk_superblock *disk)
|
||||
{
|
||||
memcpy(&disk->metadata_space_map_root,
|
||||
&pmd->metadata_space_map_root,
|
||||
sizeof(pmd->metadata_space_map_root));
|
||||
|
||||
memcpy(&disk->data_space_map_root,
|
||||
&pmd->data_space_map_root,
|
||||
sizeof(pmd->data_space_map_root));
|
||||
}
|
||||
|
||||
static int __write_initial_superblock(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
int r;
|
||||
struct dm_block *sblock;
|
||||
size_t metadata_len, data_len;
|
||||
struct thin_disk_superblock *disk_super;
|
||||
sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
|
||||
|
||||
if (bdev_size > THIN_METADATA_MAX_SECTORS)
|
||||
bdev_size = THIN_METADATA_MAX_SECTORS;
|
||||
|
||||
r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dm_sm_root_size(pmd->data_sm, &data_len);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dm_sm_commit(pmd->data_sm);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = save_sm_roots(pmd);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = dm_tm_pre_commit(pmd->tm);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@ -471,15 +505,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
|
||||
disk_super->trans_id = 0;
|
||||
disk_super->held_root = 0;
|
||||
|
||||
r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
|
||||
metadata_len);
|
||||
if (r < 0)
|
||||
goto bad_locked;
|
||||
|
||||
r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
|
||||
data_len);
|
||||
if (r < 0)
|
||||
goto bad_locked;
|
||||
copy_sm_roots(pmd, disk_super);
|
||||
|
||||
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
|
||||
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
|
||||
@ -488,10 +514,6 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
|
||||
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
|
||||
|
||||
return dm_tm_commit(pmd->tm, sblock);
|
||||
|
||||
bad_locked:
|
||||
dm_bm_unlock(sblock);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int __format_metadata(struct dm_pool_metadata *pmd)
|
||||
@ -769,6 +791,10 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = save_sm_roots(pmd);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = superblock_lock(pmd, &sblock);
|
||||
if (r)
|
||||
return r;
|
||||
@ -780,21 +806,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
|
||||
disk_super->trans_id = cpu_to_le64(pmd->trans_id);
|
||||
disk_super->flags = cpu_to_le32(pmd->flags);
|
||||
|
||||
r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
|
||||
metadata_len);
|
||||
if (r < 0)
|
||||
goto out_locked;
|
||||
|
||||
r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
|
||||
data_len);
|
||||
if (r < 0)
|
||||
goto out_locked;
|
||||
copy_sm_roots(pmd, disk_super);
|
||||
|
||||
return dm_tm_commit(pmd->tm, sblock);
|
||||
|
||||
out_locked:
|
||||
dm_bm_unlock(sblock);
|
||||
return r;
|
||||
}
|
||||
|
||||
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
|
||||
|
@ -12,9 +12,11 @@
|
||||
#include <linux/dm-io.h>
|
||||
#include <linux/dm-kcopyd.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
#define DM_MSG_PREFIX "thin"
|
||||
|
||||
@ -178,12 +180,10 @@ struct pool {
|
||||
unsigned ref_count;
|
||||
|
||||
spinlock_t lock;
|
||||
struct bio_list deferred_bios;
|
||||
struct bio_list deferred_flush_bios;
|
||||
struct list_head prepared_mappings;
|
||||
struct list_head prepared_discards;
|
||||
|
||||
struct bio_list retry_on_resume_list;
|
||||
struct list_head active_thins;
|
||||
|
||||
struct dm_deferred_set *shared_read_ds;
|
||||
struct dm_deferred_set *all_io_ds;
|
||||
@ -220,6 +220,7 @@ struct pool_c {
|
||||
* Target context for a thin.
|
||||
*/
|
||||
struct thin_c {
|
||||
struct list_head list;
|
||||
struct dm_dev *pool_dev;
|
||||
struct dm_dev *origin_dev;
|
||||
dm_thin_id dev_id;
|
||||
@ -227,6 +228,10 @@ struct thin_c {
|
||||
struct pool *pool;
|
||||
struct dm_thin_device *td;
|
||||
bool requeue_mode:1;
|
||||
spinlock_t lock;
|
||||
struct bio_list deferred_bio_list;
|
||||
struct bio_list retry_on_resume_list;
|
||||
struct rb_root sort_bio_list; /* sorted list of deferred bios */
|
||||
};
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
@ -287,9 +292,9 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc,
|
||||
struct pool *pool = tc->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
wake_worker(pool);
|
||||
}
|
||||
@ -368,6 +373,7 @@ struct dm_thin_endio_hook {
|
||||
struct dm_deferred_entry *shared_read_entry;
|
||||
struct dm_deferred_entry *all_io_entry;
|
||||
struct dm_thin_new_mapping *overwrite_mapping;
|
||||
struct rb_node rb_node;
|
||||
};
|
||||
|
||||
static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
|
||||
@ -378,30 +384,22 @@ static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
|
||||
|
||||
bio_list_init(&bios);
|
||||
|
||||
spin_lock_irqsave(&tc->pool->lock, flags);
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
bio_list_merge(&bios, master);
|
||||
bio_list_init(master);
|
||||
spin_unlock_irqrestore(&tc->pool->lock, flags);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
while ((bio = bio_list_pop(&bios))) {
|
||||
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
||||
|
||||
if (h->tc == tc)
|
||||
bio_endio(bio, DM_ENDIO_REQUEUE);
|
||||
else
|
||||
bio_list_add(master, bio);
|
||||
}
|
||||
while ((bio = bio_list_pop(&bios)))
|
||||
bio_endio(bio, DM_ENDIO_REQUEUE);
|
||||
}
|
||||
|
||||
static void requeue_io(struct thin_c *tc)
|
||||
{
|
||||
struct pool *pool = tc->pool;
|
||||
|
||||
requeue_bio_list(tc, &pool->deferred_bios);
|
||||
requeue_bio_list(tc, &pool->retry_on_resume_list);
|
||||
requeue_bio_list(tc, &tc->deferred_bio_list);
|
||||
requeue_bio_list(tc, &tc->retry_on_resume_list);
|
||||
}
|
||||
|
||||
static void error_retry_list(struct pool *pool)
|
||||
static void error_thin_retry_list(struct thin_c *tc)
|
||||
{
|
||||
struct bio *bio;
|
||||
unsigned long flags;
|
||||
@ -409,15 +407,25 @@ static void error_retry_list(struct pool *pool)
|
||||
|
||||
bio_list_init(&bios);
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_merge(&bios, &pool->retry_on_resume_list);
|
||||
bio_list_init(&pool->retry_on_resume_list);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
bio_list_merge(&bios, &tc->retry_on_resume_list);
|
||||
bio_list_init(&tc->retry_on_resume_list);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
while ((bio = bio_list_pop(&bios)))
|
||||
bio_io_error(bio);
|
||||
}
|
||||
|
||||
static void error_retry_list(struct pool *pool)
|
||||
{
|
||||
struct thin_c *tc;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tc, &pool->active_thins, list)
|
||||
error_thin_retry_list(tc);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* This section of code contains the logic for processing a thin device's IO.
|
||||
* Much of the code depends on pool object resources (lists, workqueues, etc)
|
||||
@ -608,9 +616,9 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
|
||||
struct pool *pool = tc->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
cell_release(pool, cell, &pool->deferred_bios);
|
||||
spin_unlock_irqrestore(&tc->pool->lock, flags);
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
cell_release(pool, cell, &tc->deferred_bio_list);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
wake_worker(pool);
|
||||
}
|
||||
@ -623,9 +631,9 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
|
||||
struct pool *pool = tc->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
cell_release_no_holder(pool, cell, &pool->deferred_bios);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
wake_worker(pool);
|
||||
}
|
||||
@ -1001,12 +1009,11 @@ static void retry_on_resume(struct bio *bio)
|
||||
{
|
||||
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
||||
struct thin_c *tc = h->tc;
|
||||
struct pool *pool = tc->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_add(&pool->retry_on_resume_list, bio);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
bio_list_add(&tc->retry_on_resume_list, bio);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
}
|
||||
|
||||
static bool should_error_unserviceable_bio(struct pool *pool)
|
||||
@ -1363,38 +1370,111 @@ static int need_commit_due_to_time(struct pool *pool)
|
||||
jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
|
||||
}
|
||||
|
||||
static void process_deferred_bios(struct pool *pool)
|
||||
#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
|
||||
#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
|
||||
|
||||
static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
|
||||
{
|
||||
struct rb_node **rbp, *parent;
|
||||
struct dm_thin_endio_hook *pbd;
|
||||
sector_t bi_sector = bio->bi_iter.bi_sector;
|
||||
|
||||
rbp = &tc->sort_bio_list.rb_node;
|
||||
parent = NULL;
|
||||
while (*rbp) {
|
||||
parent = *rbp;
|
||||
pbd = thin_pbd(parent);
|
||||
|
||||
if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
|
||||
rbp = &(*rbp)->rb_left;
|
||||
else
|
||||
rbp = &(*rbp)->rb_right;
|
||||
}
|
||||
|
||||
pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
||||
rb_link_node(&pbd->rb_node, parent, rbp);
|
||||
rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
|
||||
}
|
||||
|
||||
static void __extract_sorted_bios(struct thin_c *tc)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct dm_thin_endio_hook *pbd;
|
||||
struct bio *bio;
|
||||
|
||||
for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
|
||||
pbd = thin_pbd(node);
|
||||
bio = thin_bio(pbd);
|
||||
|
||||
bio_list_add(&tc->deferred_bio_list, bio);
|
||||
rb_erase(&pbd->rb_node, &tc->sort_bio_list);
|
||||
}
|
||||
|
||||
WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
|
||||
}
|
||||
|
||||
static void __sort_thin_deferred_bios(struct thin_c *tc)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct bio *bio;
|
||||
struct bio_list bios;
|
||||
|
||||
bio_list_init(&bios);
|
||||
bio_list_merge(&bios, &tc->deferred_bio_list);
|
||||
bio_list_init(&tc->deferred_bio_list);
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_merge(&bios, &pool->deferred_bios);
|
||||
bio_list_init(&pool->deferred_bios);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
/* Sort deferred_bio_list using rb-tree */
|
||||
while ((bio = bio_list_pop(&bios)))
|
||||
__thin_bio_rb_add(tc, bio);
|
||||
|
||||
/*
|
||||
* Transfer the sorted bios in sort_bio_list back to
|
||||
* deferred_bio_list to allow lockless submission of
|
||||
* all bios.
|
||||
*/
|
||||
__extract_sorted_bios(tc);
|
||||
}
|
||||
|
||||
static void process_thin_deferred_bios(struct thin_c *tc)
|
||||
{
|
||||
struct pool *pool = tc->pool;
|
||||
unsigned long flags;
|
||||
struct bio *bio;
|
||||
struct bio_list bios;
|
||||
struct blk_plug plug;
|
||||
|
||||
if (tc->requeue_mode) {
|
||||
requeue_bio_list(tc, &tc->deferred_bio_list);
|
||||
return;
|
||||
}
|
||||
|
||||
bio_list_init(&bios);
|
||||
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
|
||||
if (bio_list_empty(&tc->deferred_bio_list)) {
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
__sort_thin_deferred_bios(tc);
|
||||
|
||||
bio_list_merge(&bios, &tc->deferred_bio_list);
|
||||
bio_list_init(&tc->deferred_bio_list);
|
||||
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while ((bio = bio_list_pop(&bios))) {
|
||||
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
||||
struct thin_c *tc = h->tc;
|
||||
|
||||
if (tc->requeue_mode) {
|
||||
bio_endio(bio, DM_ENDIO_REQUEUE);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we've got no free new_mapping structs, and processing
|
||||
* this bio might require one, we pause until there are some
|
||||
* prepared mappings to process.
|
||||
*/
|
||||
if (ensure_next_mapping(pool)) {
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_merge(&pool->deferred_bios, &bios);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
bio_list_add(&tc->deferred_bio_list, bio);
|
||||
bio_list_merge(&tc->deferred_bio_list, &bios);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1403,6 +1483,20 @@ static void process_deferred_bios(struct pool *pool)
|
||||
else
|
||||
pool->process_bio(tc, bio);
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
static void process_deferred_bios(struct pool *pool)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct bio *bio;
|
||||
struct bio_list bios;
|
||||
struct thin_c *tc;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tc, &pool->active_thins, list)
|
||||
process_thin_deferred_bios(tc);
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* If there are any deferred flush bios, we must commit
|
||||
@ -1634,9 +1728,9 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
|
||||
unsigned long flags;
|
||||
struct pool *pool = tc->pool;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_add(&pool->deferred_bios, bio);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
bio_list_add(&tc->deferred_bio_list, bio);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
wake_worker(pool);
|
||||
}
|
||||
@ -1757,26 +1851,29 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
||||
|
||||
static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
||||
{
|
||||
int r;
|
||||
unsigned long flags;
|
||||
struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
|
||||
struct request_queue *q;
|
||||
|
||||
spin_lock_irqsave(&pt->pool->lock, flags);
|
||||
r = !bio_list_empty(&pt->pool->retry_on_resume_list);
|
||||
spin_unlock_irqrestore(&pt->pool->lock, flags);
|
||||
if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
|
||||
return 1;
|
||||
|
||||
if (!r) {
|
||||
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
|
||||
r = bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
return r;
|
||||
q = bdev_get_queue(pt->data_dev->bdev);
|
||||
return bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
static void __requeue_bios(struct pool *pool)
|
||||
static void requeue_bios(struct pool *pool)
|
||||
{
|
||||
bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
|
||||
bio_list_init(&pool->retry_on_resume_list);
|
||||
unsigned long flags;
|
||||
struct thin_c *tc;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tc, &pool->active_thins, list) {
|
||||
spin_lock_irqsave(&tc->lock, flags);
|
||||
bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
|
||||
bio_list_init(&tc->retry_on_resume_list);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------
|
||||
@ -1957,12 +2054,11 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
||||
INIT_WORK(&pool->worker, do_worker);
|
||||
INIT_DELAYED_WORK(&pool->waker, do_waker);
|
||||
spin_lock_init(&pool->lock);
|
||||
bio_list_init(&pool->deferred_bios);
|
||||
bio_list_init(&pool->deferred_flush_bios);
|
||||
INIT_LIST_HEAD(&pool->prepared_mappings);
|
||||
INIT_LIST_HEAD(&pool->prepared_discards);
|
||||
INIT_LIST_HEAD(&pool->active_thins);
|
||||
pool->low_water_triggered = false;
|
||||
bio_list_init(&pool->retry_on_resume_list);
|
||||
|
||||
pool->shared_read_ds = dm_deferred_set_create();
|
||||
if (!pool->shared_read_ds) {
|
||||
@ -2507,8 +2603,8 @@ static void pool_resume(struct dm_target *ti)
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
pool->low_water_triggered = false;
|
||||
__requeue_bios(pool);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
requeue_bios(pool);
|
||||
|
||||
do_waker(&pool->waker.work);
|
||||
}
|
||||
@ -2947,7 +3043,7 @@ static struct target_type pool_target = {
|
||||
.name = "thin-pool",
|
||||
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
|
||||
DM_TARGET_IMMUTABLE,
|
||||
.version = {1, 11, 0},
|
||||
.version = {1, 12, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = pool_ctr,
|
||||
.dtr = pool_dtr,
|
||||
@ -2968,6 +3064,12 @@ static struct target_type pool_target = {
|
||||
static void thin_dtr(struct dm_target *ti)
|
||||
{
|
||||
struct thin_c *tc = ti->private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tc->pool->lock, flags);
|
||||
list_del_rcu(&tc->list);
|
||||
spin_unlock_irqrestore(&tc->pool->lock, flags);
|
||||
synchronize_rcu();
|
||||
|
||||
mutex_lock(&dm_thin_pool_table.mutex);
|
||||
|
||||
@ -3014,6 +3116,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
r = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
spin_lock_init(&tc->lock);
|
||||
bio_list_init(&tc->deferred_bio_list);
|
||||
bio_list_init(&tc->retry_on_resume_list);
|
||||
tc->sort_bio_list = RB_ROOT;
|
||||
|
||||
if (argc == 3) {
|
||||
r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
|
||||
@ -3085,6 +3191,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
|
||||
mutex_unlock(&dm_thin_pool_table.mutex);
|
||||
|
||||
spin_lock(&tc->pool->lock);
|
||||
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
|
||||
spin_unlock(&tc->pool->lock);
|
||||
/*
|
||||
* This synchronize_rcu() call is needed here otherwise we risk a
|
||||
* wake_worker() call finding no bios to process (because the newly
|
||||
* added tc isn't yet visible). So this reduces latency since we
|
||||
* aren't then dependent on the periodic commit to wake_worker().
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
return 0;
|
||||
|
||||
bad_target_max_io_len:
|
||||
@ -3250,7 +3367,7 @@ static int thin_iterate_devices(struct dm_target *ti,
|
||||
|
||||
static struct target_type thin_target = {
|
||||
.name = "thin",
|
||||
.version = {1, 11, 0},
|
||||
.version = {1, 12, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = thin_ctr,
|
||||
.dtr = thin_dtr,
|
||||
|
@ -94,13 +94,6 @@ struct dm_rq_clone_bio_info {
|
||||
struct bio clone;
|
||||
};
|
||||
|
||||
union map_info *dm_get_mapinfo(struct bio *bio)
|
||||
{
|
||||
if (bio && bio->bi_private)
|
||||
return &((struct dm_target_io *)bio->bi_private)->info;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq)
|
||||
{
|
||||
if (rq && rq->end_io_data)
|
||||
@ -475,6 +468,11 @@ sector_t dm_get_size(struct mapped_device *md)
|
||||
return get_capacity(md->disk);
|
||||
}
|
||||
|
||||
struct request_queue *dm_get_md_queue(struct mapped_device *md)
|
||||
{
|
||||
return md->queue;
|
||||
}
|
||||
|
||||
struct dm_stats *dm_get_stats(struct mapped_device *md)
|
||||
{
|
||||
return &md->stats;
|
||||
@ -760,7 +758,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||
static void clone_endio(struct bio *bio, int error)
|
||||
{
|
||||
int r = 0;
|
||||
struct dm_target_io *tio = bio->bi_private;
|
||||
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
|
||||
struct dm_io *io = tio->io;
|
||||
struct mapped_device *md = tio->io->md;
|
||||
dm_endio_fn endio = tio->ti->type->end_io;
|
||||
@ -794,7 +792,8 @@ static void clone_endio(struct bio *bio, int error)
|
||||
*/
|
||||
static void end_clone_bio(struct bio *clone, int error)
|
||||
{
|
||||
struct dm_rq_clone_bio_info *info = clone->bi_private;
|
||||
struct dm_rq_clone_bio_info *info =
|
||||
container_of(clone, struct dm_rq_clone_bio_info, clone);
|
||||
struct dm_rq_target_io *tio = info->tio;
|
||||
struct bio *bio = info->orig;
|
||||
unsigned int nr_bytes = info->orig->bi_iter.bi_size;
|
||||
@ -1120,7 +1119,6 @@ static void __map_bio(struct dm_target_io *tio)
|
||||
struct dm_target *ti = tio->ti;
|
||||
|
||||
clone->bi_end_io = clone_endio;
|
||||
clone->bi_private = tio;
|
||||
|
||||
/*
|
||||
* Map the clone. If r == 0 we don't need to do
|
||||
@ -1195,7 +1193,6 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
|
||||
|
||||
tio->io = ci->io;
|
||||
tio->ti = ti;
|
||||
memset(&tio->info, 0, sizeof(tio->info));
|
||||
tio->target_bio_nr = target_bio_nr;
|
||||
|
||||
return tio;
|
||||
@ -1530,7 +1527,6 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
|
||||
info->orig = bio_orig;
|
||||
info->tio = tio;
|
||||
bio->bi_end_io = end_clone_bio;
|
||||
bio->bi_private = info;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2172,7 +2168,7 @@ static struct dm_table *__unbind(struct mapped_device *md)
|
||||
return NULL;
|
||||
|
||||
dm_table_event_callback(map, NULL, NULL);
|
||||
rcu_assign_pointer(md->map, NULL);
|
||||
RCU_INIT_POINTER(md->map, NULL);
|
||||
dm_sync_table(md);
|
||||
|
||||
return map;
|
||||
@ -2873,8 +2869,6 @@ static const struct block_device_operations dm_blk_dops = {
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
EXPORT_SYMBOL(dm_get_mapinfo);
|
||||
|
||||
/*
|
||||
* module hooks
|
||||
*/
|
||||
|
@ -73,7 +73,6 @@ unsigned dm_table_get_type(struct dm_table *t);
|
||||
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
|
||||
bool dm_table_request_based(struct dm_table *t);
|
||||
bool dm_table_supports_discards(struct dm_table *t);
|
||||
int dm_table_alloc_md_mempools(struct dm_table *t);
|
||||
void dm_table_free_md_mempools(struct dm_table *t);
|
||||
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
|
||||
|
||||
@ -189,6 +188,7 @@ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only
|
||||
int dm_cancel_deferred_remove(struct mapped_device *md);
|
||||
int dm_request_based(struct mapped_device *md);
|
||||
sector_t dm_get_size(struct mapped_device *md);
|
||||
struct request_queue *dm_get_md_queue(struct mapped_device *md);
|
||||
struct dm_stats *dm_get_stats(struct mapped_device *md);
|
||||
|
||||
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
||||
|
@ -65,7 +65,7 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
|
||||
int r;
|
||||
__le64 value;
|
||||
|
||||
if (!info->current_index_set)
|
||||
if (!info->current_index_set || !info->dirty)
|
||||
return 0;
|
||||
|
||||
value = cpu_to_le64(info->current_bits);
|
||||
@ -77,6 +77,8 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
|
||||
return r;
|
||||
|
||||
info->current_index_set = false;
|
||||
info->dirty = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bitset_flush);
|
||||
@ -94,6 +96,8 @@ static int read_bits(struct dm_disk_bitset *info, dm_block_t root,
|
||||
info->current_bits = le64_to_cpu(value);
|
||||
info->current_index_set = true;
|
||||
info->current_index = array_index;
|
||||
info->dirty = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -126,6 +130,8 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
|
||||
return r;
|
||||
|
||||
set_bit(b, (unsigned long *) &info->current_bits);
|
||||
info->dirty = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bitset_set_bit);
|
||||
@ -141,6 +147,8 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
|
||||
return r;
|
||||
|
||||
clear_bit(b, (unsigned long *) &info->current_bits);
|
||||
info->dirty = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bitset_clear_bit);
|
||||
|
@ -71,6 +71,7 @@ struct dm_disk_bitset {
|
||||
uint64_t current_bits;
|
||||
|
||||
bool current_index_set:1;
|
||||
bool dirty:1;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bm_unlock);
|
||||
|
||||
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
|
||||
struct dm_block *superblock)
|
||||
int dm_bm_flush(struct dm_block_manager *bm)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (bm->read_only)
|
||||
return -EPERM;
|
||||
|
||||
r = dm_bufio_write_dirty_buffers(bm->bufio);
|
||||
if (unlikely(r)) {
|
||||
dm_bm_unlock(superblock);
|
||||
return r;
|
||||
}
|
||||
|
||||
dm_bm_unlock(superblock);
|
||||
|
||||
return dm_bufio_write_dirty_buffers(bm->bufio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
|
||||
EXPORT_SYMBOL_GPL(dm_bm_flush);
|
||||
|
||||
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
|
||||
{
|
||||
|
@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b);
|
||||
*
|
||||
* This method always blocks.
|
||||
*/
|
||||
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
|
||||
struct dm_block *superblock);
|
||||
int dm_bm_flush(struct dm_block_manager *bm);
|
||||
|
||||
/*
|
||||
* Request data is prefetched into the cache.
|
||||
|
@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm)
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
return dm_bm_flush(tm->bm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
|
||||
|
||||
@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
|
||||
return -EWOULDBLOCK;
|
||||
|
||||
wipe_shadow_table(tm);
|
||||
dm_bm_unlock(root);
|
||||
|
||||
return dm_bm_flush_and_unlock(tm->bm, root);
|
||||
return dm_bm_flush(tm->bm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_tm_commit);
|
||||
|
||||
|
@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac
|
||||
/*
|
||||
* We use a 2-phase commit here.
|
||||
*
|
||||
* i) In the first phase the block manager is told to start flushing, and
|
||||
* the changes to the space map are written to disk. You should interrogate
|
||||
* your particular space map to get detail of its root node etc. to be
|
||||
* included in your superblock.
|
||||
* i) Make all changes for the transaction *except* for the superblock.
|
||||
* Then call dm_tm_pre_commit() to flush them to disk.
|
||||
*
|
||||
* ii) @root will be committed last. You shouldn't use more than the
|
||||
* first 512 bytes of @root if you wish the transaction to survive a power
|
||||
* failure. You *must* have a write lock held on @root for both stage (i)
|
||||
* and (ii). The commit will drop the write lock.
|
||||
* ii) Lock your superblock. Update. Then call dm_tm_commit() which will
|
||||
* unlock the superblock and flush it. No other blocks should be updated
|
||||
* during this period. Care should be taken to never unlock a partially
|
||||
* updated superblock; perform any operations that could fail *before* you
|
||||
* take the superblock lock.
|
||||
*/
|
||||
int dm_tm_pre_commit(struct dm_transaction_manager *tm);
|
||||
int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root);
|
||||
int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock);
|
||||
|
||||
/*
|
||||
* These methods are the only way to get hold of a writeable block.
|
||||
|
@ -23,7 +23,6 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
|
||||
|
||||
union map_info {
|
||||
void *ptr;
|
||||
unsigned long long ll;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -291,7 +290,6 @@ struct dm_target_callbacks {
|
||||
struct dm_target_io {
|
||||
struct dm_io *io;
|
||||
struct dm_target *ti;
|
||||
union map_info info;
|
||||
unsigned target_bio_nr;
|
||||
struct bio clone;
|
||||
};
|
||||
@ -403,7 +401,6 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
|
||||
struct gendisk *dm_disk(struct mapped_device *md);
|
||||
int dm_suspended(struct dm_target *ti);
|
||||
int dm_noflush_suspending(struct dm_target *ti);
|
||||
union map_info *dm_get_mapinfo(struct bio *bio);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
|
||||
@ -465,6 +462,11 @@ struct mapped_device *dm_table_get_md(struct dm_table *t);
|
||||
*/
|
||||
void dm_table_event(struct dm_table *t);
|
||||
|
||||
/*
|
||||
* Run the queue for request-based targets.
|
||||
*/
|
||||
void dm_table_run_md_queue_async(struct dm_table *t);
|
||||
|
||||
/*
|
||||
* The device must be suspended before calling this method.
|
||||
* Returns the previous table, which the caller must destroy.
|
||||
|
Loading…
Reference in New Issue
Block a user