2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00
linux-next/drivers/md/raid1.h
colyli@suse.de 824e47dadd RAID1: avoid unnecessary spin locks in I/O barrier code
When I run a parallel reading performan testing on a md raid1 device with
two NVMe SSDs, I observe very bad throughput in supprise: by fio with 64KB
block size, 40 seq read I/O jobs, 128 iodepth, overall throughput is
only 2.7GB/s, this is around 50% of the idea performance number.

The perf reports locking contention happens at allow_barrier() and
wait_barrier() code,
 - 41.41%  fio [kernel.kallsyms]     [k] _raw_spin_lock_irqsave
   - _raw_spin_lock_irqsave
         + 89.92% allow_barrier
         + 9.34% __wake_up
 - 37.30%  fio [kernel.kallsyms]     [k] _raw_spin_lock_irq
   - _raw_spin_lock_irq
         - 100.00% wait_barrier

The reason is, in these I/O barrier related functions,
 - raise_barrier()
 - lower_barrier()
 - wait_barrier()
 - allow_barrier()
They always hold conf->resync_lock firstly, even there are only regular
reading I/Os and no resync I/O at all. This is a huge performance penalty.

The solution is a lockless-like algorithm in I/O barrier code, and only
holding conf->resync_lock when it has to.

The original idea is from Hannes Reinecke, and Neil Brown provides
comments to improve it. I continue to work on it, and make the patch into
current form.

In the new simpler raid1 I/O barrier implementation, there are two
wait barrier functions,
 - wait_barrier()
   Which calls _wait_barrier(), is used for regular write I/O. If there is
   resync I/O happening on the same I/O barrier bucket, or the whole
   array is frozen, task will wait until no barrier on same barrier bucket,
   or the whold array is unfreezed.
 - wait_read_barrier()
   Since regular read I/O won't interfere with resync I/O (read_balance()
   will make sure only uptodate data will be read out), it is unnecessary
   to wait for barrier in regular read I/Os, waiting in only necessary
   when the whole array is frozen.

The operations on conf->nr_pending[idx], conf->nr_waiting[idx], conf->
barrier[idx] are very carefully designed in raise_barrier(),
lower_barrier(), _wait_barrier() and wait_read_barrier(), in order to
avoid unnecessary spin locks in these functions. Once conf->
nr_pengding[idx] is increased, a resync I/O with same barrier bucket index
has to wait in raise_barrier(). Then in _wait_barrier() if no barrier
raised in same barrier bucket index and array is not frozen, the regular
I/O doesn't need to hold conf->resync_lock, it can just increase
conf->nr_pending[idx], and return to its caller. wait_read_barrier() is
very similar to _wait_barrier(), the only difference is it only waits when
array is frozen. For heavy parallel reading I/Os, the lockless I/O barrier
code almostly gets rid of all spin lock cost.

This patch significantly improves raid1 reading peroformance. From my
testing, a raid1 device built by two NVMe SSD, runs fio with 64KB
blocksize, 40 seq read I/O jobs, 128 iodepth, overall throughput
increases from 2.7GB/s to 4.6GB/s (+70%).

Changelog
V4:
- Change conf->nr_queued[] to atomic_t.
- Define BARRIER_BUCKETS_NR_BITS by (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
V3:
- Add smp_mb__after_atomic() as Shaohua and Neil suggested.
- Change conf->nr_queued[] from atomic_t to int.
- Change conf->array_frozen from atomic_t back to int, and use
  READ_ONCE(conf->array_frozen) to check value of conf->array_frozen
  in _wait_barrier() and wait_read_barrier().
- In _wait_barrier() and wait_read_barrier(), add a call to
  wake_up(&conf->wait_barrier) after atomic_dec(&conf->nr_pending[idx]),
  to fix a deadlock between  _wait_barrier()/wait_read_barrier and
  freeze_array().
V2:
- Remove a spin_lock/unlock pair in raid1d().
- Add more code comments to explain why there is no racy when checking two
  atomic_t variables at same time.
V1:
- Original RFC patch for comments.

Signed-off-by: Coly Li <colyli@suse.de>
Cc: Shaohua Li <shli@fb.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Guoqing Jiang <gqjiang@suse.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Shaohua Li <shli@fb.com>
2017-02-19 22:04:25 -08:00

199 lines
5.6 KiB
C

#ifndef _RAID1_H
#define _RAID1_H
/*
* each barrier unit size is 64MB fow now
* note: it must be larger than RESYNC_DEPTH
*/
#define BARRIER_UNIT_SECTOR_BITS 17
#define BARRIER_UNIT_SECTOR_SIZE (1<<17)
/*
* In struct r1conf, the following members are related to I/O barrier
* buckets,
* atomic_t *nr_pending;
* atomic_t *nr_waiting;
* atomic_t *nr_queued;
* atomic_t *barrier;
* Each of them points to array of atomic_t variables, each array is
* designed to have BARRIER_BUCKETS_NR elements and occupy a single
* memory page. The data width of atomic_t variables is 4 bytes, equal
* to 1<<(ilog2(sizeof(atomic_t))), BARRIER_BUCKETS_NR_BITS is defined
* as (PAGE_SHIFT - ilog2(sizeof(int))) to make sure an array of
* atomic_t variables with BARRIER_BUCKETS_NR elements just exactly
* occupies a single memory page.
*/
#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
struct raid1_info {
struct md_rdev *rdev;
sector_t head_position;
/* When choose the best device for a read (read_balance())
* we try to keep sequential reads one the same device
*/
sector_t next_seq_sect;
sector_t seq_start;
};
/*
* memory pools need a pointer to the mddev, so they can force an unplug
* when memory is tight, and a count of the number of drives that the
* pool was allocated for, so they know how much to allocate and free.
* mddev->raid_disks cannot be used, as it can change while a pool is active
* These two datums are stored in a kmalloced struct.
* The 'raid_disks' here is twice the raid_disks in r1conf.
* This allows space for each 'real' device can have a replacement in the
* second half of the array.
*/
struct pool_info {
struct mddev *mddev;
int raid_disks;
};
struct r1conf {
struct mddev *mddev;
struct raid1_info *mirrors; /* twice 'raid_disks' to
* allow for replacements.
*/
int raid_disks;
spinlock_t device_lock;
/* list of 'struct r1bio' that need to be processed by raid1d,
* whether to retry a read, writeout a resync or recovery
* block, or anything else.
*/
struct list_head retry_list;
/* A separate list of r1bio which just need raid_end_bio_io called.
* This mustn't happen for writes which had any errors if the superblock
* needs to be written.
*/
struct list_head bio_end_io_list;
/* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
int pending_count;
/* for use when syncing mirrors:
* We don't allow both normal IO and resync/recovery IO at
* the same time - resync/recovery can only happen when there
* is no other IO. So when either is active, the other has to wait.
* See more details description in raid1.c near raise_barrier().
*/
wait_queue_head_t wait_barrier;
spinlock_t resync_lock;
atomic_t *nr_pending;
atomic_t *nr_waiting;
atomic_t *nr_queued;
atomic_t *barrier;
int array_frozen;
/* Set to 1 if a full sync is needed, (fresh device added).
* Cleared when a sync completes.
*/
int fullsync;
/* When the same as mddev->recovery_disabled we don't allow
* recovery to be attempted as we expect a read error.
*/
int recovery_disabled;
/* poolinfo contains information about the content of the
* mempools - it changes when the array grows or shrinks
*/
struct pool_info *poolinfo;
mempool_t *r1bio_pool;
mempool_t *r1buf_pool;
/* temporary buffer to synchronous IO when attempting to repair
* a read error.
*/
struct page *tmppage;
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
/* Keep track of cluster resync window to send to other
* nodes.
*/
sector_t cluster_sync_low;
sector_t cluster_sync_high;
};
/*
* this is our 'private' RAID1 bio.
*
* it contains information about what kind of IO operations were started
* for this RAID1 operation, and about their status:
*/
struct r1bio {
atomic_t remaining; /* 'have we finished' count,
* used from IRQ handlers
*/
atomic_t behind_remaining; /* number of write-behind ios remaining
* in this BehindIO request
*/
sector_t sector;
int sectors;
unsigned long state;
struct mddev *mddev;
/*
* original bio going to /dev/mdx
*/
struct bio *master_bio;
/*
* if the IO is in READ direction, then this is where we read
*/
int read_disk;
struct list_head retry_list;
/* Next two are only valid when R1BIO_BehindIO is set */
struct bio_vec *behind_bvecs;
int behind_page_count;
/*
* if the IO is in WRITE direction, then multiple bios are used.
* We choose the number when they are allocated.
*/
struct bio *bios[0];
/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
};
/* bits for r1bio.state */
enum r1bio_state {
R1BIO_Uptodate,
R1BIO_IsSync,
R1BIO_Degraded,
R1BIO_BehindIO,
/* Set ReadError on bios that experience a readerror so that
* raid1d knows what to do with them.
*/
R1BIO_ReadError,
/* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when
* any write-behind write succeeds, otherwise we call
* with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag...
*/
R1BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag
*/
R1BIO_MadeGood,
R1BIO_WriteError,
R1BIO_FailFast,
};
static inline int sector_to_idx(sector_t sector)
{
return hash_long(sector >> BARRIER_UNIT_SECTOR_BITS,
BARRIER_BUCKETS_NR_BITS);
}
#endif