mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-28 05:24:47 +08:00
c37511b863
The tracepoints were reworked to be more sensible, and fixed a null pointer deref in one of the tracepoints. Converted some of the pr_debug()s to tracepoints - this is partly a performance optimization; it used to be that with DEBUG or CONFIG_DYNAMIC_DEBUG pr_debug() was an empty macro; but at some point it was changed to an empty inline function. Some of the pr_debug() statements had rather expensive function calls as part of the arguments, so this code was getting run unnecessarily even on non debug kernels - in some fast paths, too. Signed-off-by: Kent Overstreet <koverstreet@google.com>
63 lines
1.5 KiB
C
63 lines
1.5 KiB
C
#ifndef _BCACHE_REQUEST_H_
|
|
#define _BCACHE_REQUEST_H_
|
|
|
|
#include <linux/cgroup.h>
|
|
|
|
struct search {
|
|
/* Stack frame for bio_complete */
|
|
struct closure cl;
|
|
|
|
struct bcache_device *d;
|
|
struct task_struct *task;
|
|
|
|
struct bbio bio;
|
|
struct bio *orig_bio;
|
|
struct bio *cache_miss;
|
|
unsigned cache_bio_sectors;
|
|
|
|
unsigned recoverable:1;
|
|
unsigned unaligned_bvec:1;
|
|
|
|
unsigned write:1;
|
|
unsigned writeback:1;
|
|
|
|
/* IO error returned to s->bio */
|
|
short error;
|
|
unsigned long start_time;
|
|
|
|
/* Anything past op->keys won't get zeroed in do_bio_hook */
|
|
struct btree_op op;
|
|
};
|
|
|
|
void bch_cache_read_endio(struct bio *, int);
|
|
unsigned bch_get_congested(struct cache_set *);
|
|
void bch_insert_data(struct closure *cl);
|
|
void bch_btree_insert_async(struct closure *);
|
|
void bch_cache_read_endio(struct bio *, int);
|
|
|
|
void bch_open_buckets_free(struct cache_set *);
|
|
int bch_open_buckets_alloc(struct cache_set *);
|
|
|
|
void bch_cached_dev_request_init(struct cached_dev *dc);
|
|
void bch_flash_dev_request_init(struct bcache_device *d);
|
|
|
|
extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
|
|
|
|
struct bch_cgroup {
|
|
#ifdef CONFIG_CGROUP_BCACHE
|
|
struct cgroup_subsys_state css;
|
|
#endif
|
|
/*
|
|
* We subtract one from the index into bch_cache_modes[], so that
|
|
* default == -1; this makes it so the rest match up with d->cache_mode,
|
|
* and we use d->cache_mode if cgrp->cache_mode < 0
|
|
*/
|
|
short cache_mode;
|
|
bool verify;
|
|
struct cache_stat_collector stats;
|
|
};
|
|
|
|
struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
|
|
|
|
#endif /* _BCACHE_REQUEST_H_ */
|