bcache: bugfix - moving_gc now moves only correct buckets

Removed gc_move_threshold because picking buckets only by
threshold could lead moving extra buckets (ei. if there are
buckets at the threshold that aren't supposed to be moved
do to space considerations).

This is replaced by a GC_MOVE bit in the gc_mark bitmask.
Now only marked buckets get moved.

Signed-off-by: Nicholas Swenson <nks@daterainc.com>
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
Nicholas Swenson 2013-11-07 17:53:19 -08:00 committed by Kent Overstreet
parent bee63f40cb
commit 981aa8c091
3 changed files with 8 additions and 8 deletions

View File

@ -421,9 +421,11 @@ out:
if (watermark <= WATERMARK_METADATA) { if (watermark <= WATERMARK_METADATA) {
SET_GC_MARK(b, GC_MARK_METADATA); SET_GC_MARK(b, GC_MARK_METADATA);
SET_GC_MOVE(b, 0);
b->prio = BTREE_PRIO; b->prio = BTREE_PRIO;
} else { } else {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
SET_GC_MOVE(b, 0);
b->prio = INITIAL_PRIO; b->prio = INITIAL_PRIO;
} }

View File

@ -197,7 +197,7 @@ struct bucket {
uint8_t disk_gen; uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */ uint8_t last_gc; /* Most out of date gen in the btree */
uint8_t gc_gen; uint8_t gc_gen;
uint16_t gc_mark; uint16_t gc_mark; /* Bitfield used by GC. See below for field */
}; };
/* /*
@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0 #define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1 #define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2 #define GC_MARK_METADATA 2
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14); BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h" #include "journal.h"
#include "stats.h" #include "stats.h"
@ -445,7 +446,6 @@ struct cache {
* call prio_write() to keep gens from wrapping. * call prio_write() to keep gens from wrapping.
*/ */
uint8_t need_save_prio; uint8_t need_save_prio;
unsigned gc_move_threshold;
/* /*
* If nonzero, we know we aren't going to find any buckets to invalidate * If nonzero, we know we aren't going to find any buckets to invalidate

View File

@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
unsigned i; unsigned i;
for (i = 0; i < KEY_PTRS(k); i++) { for (i = 0; i < KEY_PTRS(k); i++) {
struct cache *ca = PTR_CACHE(c, k, i);
struct bucket *g = PTR_BUCKET(c, k, i); struct bucket *g = PTR_BUCKET(c, k, i);
if (GC_SECTORS_USED(g) < ca->gc_move_threshold) if (GC_MOVE(g))
return true; return true;
} }
@ -227,9 +226,8 @@ void bch_moving_gc(struct cache_set *c)
sectors_to_move -= GC_SECTORS_USED(b); sectors_to_move -= GC_SECTORS_USED(b);
} }
ca->gc_move_threshold = bucket_heap_top(ca); while (heap_pop(&ca->heap, b, bucket_cmp))
SET_GC_MOVE(b, 1);
pr_debug("threshold %u", ca->gc_move_threshold);
} }
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);