mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-25 03:55:09 +08:00
3ea7daa5d7
A 'near' or 'offset' lay RAID10 array can be reshaped to a different 'near' or 'offset' layout, a different chunk size, and a different number of devices. However the number of copies cannot change. Unlike RAID5/6, we do not support having user-space backup data that is being relocated during a 'critical section'. Rather, the data_offset of each device must change so that when writing any block to a new location, it will not over-write any data that is still 'live'. This means that RAID10 reshape is not supportable on v0.90 metadata. The different between the old data_offset and the new_offset must be at least the larger of the chunksize multiplied by offset copies of each of the old and new layout. (for 'near' mode, offset_copies == 1). A larger difference of around 64M seems useful for in-place reshapes as more data can be moved between metadata updates. Very large differences (e.g. 512M) seem to slow the process down due to lots of long seeks (on oldish consumer graded devices at least). Metadata needs to be updated whenever the place we are about to write to is considered - by the current metadata - to still contain data in the old layout. [unbalanced locking fix from Dan Carpenter <dan.carpenter@oracle.com>] Signed-off-by: NeilBrown <neilb@suse.de>
163 lines
4.3 KiB
C
163 lines
4.3 KiB
C
#ifndef _RAID10_H
|
|
#define _RAID10_H
|
|
|
|
struct mirror_info {
|
|
struct md_rdev *rdev, *replacement;
|
|
sector_t head_position;
|
|
int recovery_disabled; /* matches
|
|
* mddev->recovery_disabled
|
|
* when we shouldn't try
|
|
* recovering this device.
|
|
*/
|
|
};
|
|
|
|
struct r10conf {
|
|
struct mddev *mddev;
|
|
struct mirror_info *mirrors;
|
|
struct mirror_info *mirrors_new, *mirrors_old;
|
|
spinlock_t device_lock;
|
|
|
|
/* geometry */
|
|
struct geom {
|
|
int raid_disks;
|
|
int near_copies; /* number of copies laid out
|
|
* raid0 style */
|
|
int far_copies; /* number of copies laid out
|
|
* at large strides across drives
|
|
*/
|
|
int far_offset; /* far_copies are offset by 1
|
|
* stripe instead of many
|
|
*/
|
|
sector_t stride; /* distance between far copies.
|
|
* This is size / far_copies unless
|
|
* far_offset, in which case it is
|
|
* 1 stripe.
|
|
*/
|
|
int chunk_shift; /* shift from chunks to sectors */
|
|
sector_t chunk_mask;
|
|
} prev, geo;
|
|
int copies; /* near_copies * far_copies.
|
|
* must be <= raid_disks
|
|
*/
|
|
|
|
sector_t dev_sectors; /* temp copy of
|
|
* mddev->dev_sectors */
|
|
sector_t reshape_progress;
|
|
sector_t reshape_safe;
|
|
unsigned long reshape_checkpoint;
|
|
sector_t offset_diff;
|
|
|
|
struct list_head retry_list;
|
|
/* queue pending writes and submit them on unplug */
|
|
struct bio_list pending_bio_list;
|
|
int pending_count;
|
|
|
|
spinlock_t resync_lock;
|
|
int nr_pending;
|
|
int nr_waiting;
|
|
int nr_queued;
|
|
int barrier;
|
|
sector_t next_resync;
|
|
int fullsync; /* set to 1 if a full sync is needed,
|
|
* (fresh device added).
|
|
* Cleared when a sync completes.
|
|
*/
|
|
int have_replacement; /* There is at least one
|
|
* replacement device.
|
|
*/
|
|
wait_queue_head_t wait_barrier;
|
|
|
|
mempool_t *r10bio_pool;
|
|
mempool_t *r10buf_pool;
|
|
struct page *tmppage;
|
|
|
|
/* When taking over an array from a different personality, we store
|
|
* the new thread here until we fully activate the array.
|
|
*/
|
|
struct md_thread *thread;
|
|
};
|
|
|
|
/*
|
|
* this is our 'private' RAID10 bio.
|
|
*
|
|
* it contains information about what kind of IO operations were started
|
|
* for this RAID10 operation, and about their status:
|
|
*/
|
|
|
|
struct r10bio {
|
|
atomic_t remaining; /* 'have we finished' count,
|
|
* used from IRQ handlers
|
|
*/
|
|
sector_t sector; /* virtual sector number */
|
|
int sectors;
|
|
unsigned long state;
|
|
struct mddev *mddev;
|
|
/*
|
|
* original bio going to /dev/mdx
|
|
*/
|
|
struct bio *master_bio;
|
|
/*
|
|
* if the IO is in READ direction, then this is where we read
|
|
*/
|
|
int read_slot;
|
|
|
|
struct list_head retry_list;
|
|
/*
|
|
* if the IO is in WRITE direction, then multiple bios are used,
|
|
* one for each copy.
|
|
* When resyncing we also use one for each copy.
|
|
* When reconstructing, we use 2 bios, one for read, one for write.
|
|
* We choose the number when they are allocated.
|
|
* We sometimes need an extra bio to write to the replacement.
|
|
*/
|
|
struct {
|
|
struct bio *bio;
|
|
union {
|
|
struct bio *repl_bio; /* used for resync and
|
|
* writes */
|
|
struct md_rdev *rdev; /* used for reads
|
|
* (read_slot >= 0) */
|
|
};
|
|
sector_t addr;
|
|
int devnum;
|
|
} devs[0];
|
|
};
|
|
|
|
/* when we get a read error on a read-only array, we redirect to another
|
|
* device without failing the first device, or trying to over-write to
|
|
* correct the read error. To keep track of bad blocks on a per-bio
|
|
* level, we store IO_BLOCKED in the appropriate 'bios' pointer
|
|
*/
|
|
#define IO_BLOCKED ((struct bio*)1)
|
|
/* When we successfully write to a known bad-block, we need to remove the
|
|
* bad-block marking which must be done from process context. So we record
|
|
* the success by setting devs[n].bio to IO_MADE_GOOD
|
|
*/
|
|
#define IO_MADE_GOOD ((struct bio *)2)
|
|
|
|
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
|
|
|
|
/* bits for r10bio.state */
|
|
enum r10bio_state {
|
|
R10BIO_Uptodate,
|
|
R10BIO_IsSync,
|
|
R10BIO_IsRecover,
|
|
R10BIO_IsReshape,
|
|
R10BIO_Degraded,
|
|
/* Set ReadError on bios that experience a read error
|
|
* so that raid10d knows what to do with them.
|
|
*/
|
|
R10BIO_ReadError,
|
|
/* If a write for this request means we can clear some
|
|
* known-bad-block records, we set this flag.
|
|
*/
|
|
R10BIO_MadeGood,
|
|
R10BIO_WriteError,
|
|
/* During a reshape we might be performing IO on the
|
|
* 'previous' part of the array, in which case this
|
|
* flag is set
|
|
*/
|
|
R10BIO_Previous,
|
|
};
|
|
#endif
|