mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
Merge branch 'fscache-fixes' into for-next
This commit is contained in:
commit
8ea3a7c0df
@ -676,6 +676,29 @@ FS-Cache provides some utilities that a cache backend may make use of:
|
||||
as possible.
|
||||
|
||||
|
||||
(*) Indicate that a stale object was found and discarded:
|
||||
|
||||
void fscache_object_retrying_stale(struct fscache_object *object);
|
||||
|
||||
This is called to indicate that the lookup procedure found an object in
|
||||
the cache that the netfs decided was stale. The object has been
|
||||
discarded from the cache and the lookup will be performed again.
|
||||
|
||||
|
||||
(*) Indicate that the caching backend killed an object:
|
||||
|
||||
void fscache_object_mark_killed(struct fscache_object *object,
|
||||
enum fscache_why_object_killed why);
|
||||
|
||||
This is called to indicate that the cache backend preemptively killed an
|
||||
object. The why parameter should be set to indicate the reason:
|
||||
|
||||
FSCACHE_OBJECT_IS_STALE - the object was stale and needs discarding.
|
||||
FSCACHE_OBJECT_NO_SPACE - there was insufficient cache space
|
||||
FSCACHE_OBJECT_WAS_RETIRED - the object was retired when relinquished.
|
||||
FSCACHE_OBJECT_WAS_CULLED - the object was culled to make space.
|
||||
|
||||
|
||||
(*) Get and release references on a retrieval record:
|
||||
|
||||
void fscache_get_retrieval(struct fscache_retrieval *op);
|
||||
|
@ -284,8 +284,9 @@ proc files.
|
||||
enq=N Number of times async ops queued for processing
|
||||
can=N Number of async ops cancelled
|
||||
rej=N Number of async ops rejected due to object lookup/create failure
|
||||
ini=N Number of async ops initialised
|
||||
dfr=N Number of async ops queued for deferred release
|
||||
rel=N Number of async ops released
|
||||
rel=N Number of async ops released (should equal ini=N when idle)
|
||||
gc=N Number of deferred-release async ops garbage collected
|
||||
CacheOp alo=N Number of in-progress alloc_object() cache ops
|
||||
luo=N Number of in-progress lookup_object() cache ops
|
||||
@ -303,6 +304,10 @@ proc files.
|
||||
wrp=N Number of in-progress write_page() cache ops
|
||||
ucp=N Number of in-progress uncache_page() cache ops
|
||||
dsp=N Number of in-progress dissociate_pages() cache ops
|
||||
CacheEv nsp=N Number of object lookups/creations rejected due to lack of space
|
||||
stl=N Number of stale objects deleted
|
||||
rtr=N Number of objects retired when relinquished
|
||||
cul=N Number of objects culled
|
||||
|
||||
|
||||
(*) /proc/fs/fscache/histogram
|
||||
|
@ -43,7 +43,6 @@ struct cachefiles_object {
|
||||
loff_t i_size; /* object size */
|
||||
unsigned long flags;
|
||||
#define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */
|
||||
#define CACHEFILES_OBJECT_BURIED 1 /* T if preemptively buried */
|
||||
atomic_t usage; /* object usage count */
|
||||
uint8_t type; /* object type */
|
||||
uint8_t new; /* T if object new */
|
||||
|
@ -97,7 +97,8 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object,
|
||||
* call vfs_unlink(), vfs_rmdir() or vfs_rename()
|
||||
*/
|
||||
static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
|
||||
struct dentry *dentry)
|
||||
struct dentry *dentry,
|
||||
enum fscache_why_object_killed why)
|
||||
{
|
||||
struct cachefiles_object *object;
|
||||
struct rb_node *p;
|
||||
@ -132,8 +133,9 @@ found_dentry:
|
||||
pr_err("\n");
|
||||
pr_err("Error: Can't preemptively bury live object\n");
|
||||
cachefiles_printk_object(object, NULL);
|
||||
} else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
|
||||
pr_err("Error: Object already preemptively buried\n");
|
||||
} else {
|
||||
if (why != FSCACHE_OBJECT_IS_STALE)
|
||||
fscache_object_mark_killed(&object->fscache, why);
|
||||
}
|
||||
|
||||
write_unlock(&cache->active_lock);
|
||||
@ -265,7 +267,8 @@ requeue:
|
||||
static int cachefiles_bury_object(struct cachefiles_cache *cache,
|
||||
struct dentry *dir,
|
||||
struct dentry *rep,
|
||||
bool preemptive)
|
||||
bool preemptive,
|
||||
enum fscache_why_object_killed why)
|
||||
{
|
||||
struct dentry *grave, *trap;
|
||||
struct path path, path_to_graveyard;
|
||||
@ -289,7 +292,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
|
||||
ret = vfs_unlink(d_inode(dir), rep, NULL);
|
||||
|
||||
if (preemptive)
|
||||
cachefiles_mark_object_buried(cache, rep);
|
||||
cachefiles_mark_object_buried(cache, rep, why);
|
||||
}
|
||||
|
||||
mutex_unlock(&d_inode(dir)->i_mutex);
|
||||
@ -394,7 +397,7 @@ try_again:
|
||||
"Rename failed with error %d", ret);
|
||||
|
||||
if (preemptive)
|
||||
cachefiles_mark_object_buried(cache, rep);
|
||||
cachefiles_mark_object_buried(cache, rep, why);
|
||||
}
|
||||
|
||||
unlock_rename(cache->graveyard, dir);
|
||||
@ -422,7 +425,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
|
||||
|
||||
mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
|
||||
|
||||
if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
|
||||
if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
|
||||
/* object allocation for the same key preemptively deleted this
|
||||
* object's file so that it could create its own file */
|
||||
_debug("object preemptively buried");
|
||||
@ -433,7 +436,8 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
|
||||
* may have been renamed */
|
||||
if (dir == object->dentry->d_parent) {
|
||||
ret = cachefiles_bury_object(cache, dir,
|
||||
object->dentry, false);
|
||||
object->dentry, false,
|
||||
FSCACHE_OBJECT_WAS_RETIRED);
|
||||
} else {
|
||||
/* it got moved, presumably by cachefilesd culling it,
|
||||
* so it's no longer in the key path and we can ignore
|
||||
@ -522,7 +526,7 @@ lookup_again:
|
||||
if (d_is_negative(next)) {
|
||||
ret = cachefiles_has_space(cache, 1, 0);
|
||||
if (ret < 0)
|
||||
goto create_error;
|
||||
goto no_space_error;
|
||||
|
||||
path.dentry = dir;
|
||||
ret = security_path_mkdir(&path, next, 0);
|
||||
@ -551,7 +555,7 @@ lookup_again:
|
||||
if (d_is_negative(next)) {
|
||||
ret = cachefiles_has_space(cache, 1, 0);
|
||||
if (ret < 0)
|
||||
goto create_error;
|
||||
goto no_space_error;
|
||||
|
||||
path.dentry = dir;
|
||||
ret = security_path_mknod(&path, next, S_IFREG, 0);
|
||||
@ -602,7 +606,8 @@ lookup_again:
|
||||
* mutex) */
|
||||
object->dentry = NULL;
|
||||
|
||||
ret = cachefiles_bury_object(cache, dir, next, true);
|
||||
ret = cachefiles_bury_object(cache, dir, next, true,
|
||||
FSCACHE_OBJECT_IS_STALE);
|
||||
dput(next);
|
||||
next = NULL;
|
||||
|
||||
@ -610,6 +615,7 @@ lookup_again:
|
||||
goto delete_error;
|
||||
|
||||
_debug("redo lookup");
|
||||
fscache_object_retrying_stale(&object->fscache);
|
||||
goto lookup_again;
|
||||
}
|
||||
}
|
||||
@ -662,6 +668,8 @@ lookup_again:
|
||||
_leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
|
||||
return 0;
|
||||
|
||||
no_space_error:
|
||||
fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE);
|
||||
create_error:
|
||||
_debug("create error %d", ret);
|
||||
if (ret == -EIO)
|
||||
@ -927,7 +935,8 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
|
||||
/* actually remove the victim (drops the dir mutex) */
|
||||
_debug("bury");
|
||||
|
||||
ret = cachefiles_bury_object(cache, dir, victim, false);
|
||||
ret = cachefiles_bury_object(cache, dir, victim, false,
|
||||
FSCACHE_OBJECT_WAS_CULLED);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
|
@ -327,7 +327,8 @@ static int fscache_alloc_object(struct fscache_cache *cache,
|
||||
|
||||
object_already_extant:
|
||||
ret = -ENOBUFS;
|
||||
if (fscache_object_is_dead(object)) {
|
||||
if (fscache_object_is_dying(object) ||
|
||||
fscache_cache_is_broken(object)) {
|
||||
spin_unlock(&cookie->lock);
|
||||
goto error;
|
||||
}
|
||||
@ -671,7 +672,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
|
||||
if (!op)
|
||||
return -ENOMEM;
|
||||
|
||||
fscache_operation_init(op, NULL, NULL);
|
||||
fscache_operation_init(op, NULL, NULL, NULL);
|
||||
op->flags = FSCACHE_OP_MYTHREAD |
|
||||
(1 << FSCACHE_OP_WAITING) |
|
||||
(1 << FSCACHE_OP_UNUSE_COOKIE);
|
||||
@ -695,8 +696,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
|
||||
/* the work queue now carries its own ref on the object */
|
||||
spin_unlock(&cookie->lock);
|
||||
|
||||
ret = fscache_wait_for_operation_activation(object, op,
|
||||
NULL, NULL, NULL);
|
||||
ret = fscache_wait_for_operation_activation(object, op, NULL, NULL);
|
||||
if (ret == 0) {
|
||||
/* ask the cache to honour the operation */
|
||||
ret = object->cache->ops->check_consistency(op);
|
||||
|
@ -124,8 +124,7 @@ extern int fscache_submit_exclusive_op(struct fscache_object *,
|
||||
struct fscache_operation *);
|
||||
extern int fscache_submit_op(struct fscache_object *,
|
||||
struct fscache_operation *);
|
||||
extern int fscache_cancel_op(struct fscache_operation *,
|
||||
void (*)(struct fscache_operation *));
|
||||
extern int fscache_cancel_op(struct fscache_operation *, bool);
|
||||
extern void fscache_cancel_all_ops(struct fscache_object *);
|
||||
extern void fscache_abort_object(struct fscache_object *);
|
||||
extern void fscache_start_operations(struct fscache_object *);
|
||||
@ -138,8 +137,7 @@ extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
|
||||
extern int fscache_wait_for_operation_activation(struct fscache_object *,
|
||||
struct fscache_operation *,
|
||||
atomic_t *,
|
||||
atomic_t *,
|
||||
void (*)(struct fscache_operation *));
|
||||
atomic_t *);
|
||||
extern void fscache_invalidate_writes(struct fscache_cookie *);
|
||||
|
||||
/*
|
||||
@ -164,6 +162,7 @@ extern atomic_t fscache_n_op_pend;
|
||||
extern atomic_t fscache_n_op_run;
|
||||
extern atomic_t fscache_n_op_enqueue;
|
||||
extern atomic_t fscache_n_op_deferred_release;
|
||||
extern atomic_t fscache_n_op_initialised;
|
||||
extern atomic_t fscache_n_op_release;
|
||||
extern atomic_t fscache_n_op_gc;
|
||||
extern atomic_t fscache_n_op_cancelled;
|
||||
@ -271,6 +270,11 @@ extern atomic_t fscache_n_cop_write_page;
|
||||
extern atomic_t fscache_n_cop_uncache_page;
|
||||
extern atomic_t fscache_n_cop_dissociate_pages;
|
||||
|
||||
extern atomic_t fscache_n_cache_no_space_reject;
|
||||
extern atomic_t fscache_n_cache_stale_objects;
|
||||
extern atomic_t fscache_n_cache_retired_objects;
|
||||
extern atomic_t fscache_n_cache_culled_objects;
|
||||
|
||||
static inline void fscache_stat(atomic_t *stat)
|
||||
{
|
||||
atomic_inc(stat);
|
||||
|
@ -327,6 +327,17 @@ void fscache_object_init(struct fscache_object *object,
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_object_init);
|
||||
|
||||
/*
|
||||
* Mark the object as no longer being live, making sure that we synchronise
|
||||
* against op submission.
|
||||
*/
|
||||
static inline void fscache_mark_object_dead(struct fscache_object *object)
|
||||
{
|
||||
spin_lock(&object->lock);
|
||||
clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
|
||||
spin_unlock(&object->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Abort object initialisation before we start it.
|
||||
*/
|
||||
@ -610,6 +621,8 @@ static const struct fscache_state *fscache_lookup_failure(struct fscache_object
|
||||
object->cache->ops->lookup_complete(object);
|
||||
fscache_stat_d(&fscache_n_cop_lookup_complete);
|
||||
|
||||
set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
|
||||
|
||||
cookie = object->cookie;
|
||||
set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
|
||||
if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
|
||||
@ -629,7 +642,7 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
|
||||
_enter("{OBJ%x,%d,%d},%d",
|
||||
object->debug_id, object->n_ops, object->n_children, event);
|
||||
|
||||
clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
|
||||
fscache_mark_object_dead(object);
|
||||
object->oob_event_mask = 0;
|
||||
|
||||
if (list_empty(&object->dependents) &&
|
||||
@ -948,7 +961,8 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
|
||||
if (!op)
|
||||
goto nomem;
|
||||
|
||||
fscache_operation_init(op, object->cache->ops->invalidate_object, NULL);
|
||||
fscache_operation_init(op, object->cache->ops->invalidate_object,
|
||||
NULL, NULL);
|
||||
op->flags = FSCACHE_OP_ASYNC |
|
||||
(1 << FSCACHE_OP_EXCLUSIVE) |
|
||||
(1 << FSCACHE_OP_UNUSE_COOKIE);
|
||||
@ -974,13 +988,13 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
|
||||
return transit_to(UPDATE_OBJECT);
|
||||
|
||||
nomem:
|
||||
clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
|
||||
fscache_mark_object_dead(object);
|
||||
fscache_unuse_cookie(object);
|
||||
_leave(" [ENOMEM]");
|
||||
return transit_to(KILL_OBJECT);
|
||||
|
||||
submit_op_failed:
|
||||
clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
|
||||
fscache_mark_object_dead(object);
|
||||
spin_unlock(&cookie->lock);
|
||||
fscache_unuse_cookie(object);
|
||||
kfree(op);
|
||||
@ -1016,3 +1030,50 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
|
||||
_leave("");
|
||||
return transit_to(WAIT_FOR_CMD);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_object_retrying_stale - Note retrying stale object
|
||||
* @object: The object that will be retried
|
||||
*
|
||||
* Note that an object lookup found an on-disk object that was adjudged to be
|
||||
* stale and has been deleted. The lookup will be retried.
|
||||
*/
|
||||
void fscache_object_retrying_stale(struct fscache_object *object)
|
||||
{
|
||||
fscache_stat(&fscache_n_cache_no_space_reject);
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_object_retrying_stale);
|
||||
|
||||
/**
|
||||
* fscache_object_mark_killed - Note that an object was killed
|
||||
* @object: The object that was culled
|
||||
* @why: The reason the object was killed.
|
||||
*
|
||||
* Note that an object was killed. Returns true if the object was
|
||||
* already marked killed, false if it wasn't.
|
||||
*/
|
||||
void fscache_object_mark_killed(struct fscache_object *object,
|
||||
enum fscache_why_object_killed why)
|
||||
{
|
||||
if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
|
||||
pr_err("Error: Object already killed by cache [%s]\n",
|
||||
object->cache->identifier);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (why) {
|
||||
case FSCACHE_OBJECT_NO_SPACE:
|
||||
fscache_stat(&fscache_n_cache_no_space_reject);
|
||||
break;
|
||||
case FSCACHE_OBJECT_IS_STALE:
|
||||
fscache_stat(&fscache_n_cache_stale_objects);
|
||||
break;
|
||||
case FSCACHE_OBJECT_WAS_RETIRED:
|
||||
fscache_stat(&fscache_n_cache_retired_objects);
|
||||
break;
|
||||
case FSCACHE_OBJECT_WAS_CULLED:
|
||||
fscache_stat(&fscache_n_cache_culled_objects);
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_object_mark_killed);
|
||||
|
@ -20,6 +20,35 @@
|
||||
atomic_t fscache_op_debug_id;
|
||||
EXPORT_SYMBOL(fscache_op_debug_id);
|
||||
|
||||
static void fscache_operation_dummy_cancel(struct fscache_operation *op)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_operation_init - Do basic initialisation of an operation
|
||||
* @op: The operation to initialise
|
||||
* @release: The release function to assign
|
||||
*
|
||||
* Do basic initialisation of an operation. The caller must still set flags,
|
||||
* object and processor if needed.
|
||||
*/
|
||||
void fscache_operation_init(struct fscache_operation *op,
|
||||
fscache_operation_processor_t processor,
|
||||
fscache_operation_cancel_t cancel,
|
||||
fscache_operation_release_t release)
|
||||
{
|
||||
INIT_WORK(&op->work, fscache_op_work_func);
|
||||
atomic_set(&op->usage, 1);
|
||||
op->state = FSCACHE_OP_ST_INITIALISED;
|
||||
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
|
||||
op->processor = processor;
|
||||
op->cancel = cancel ?: fscache_operation_dummy_cancel;
|
||||
op->release = release;
|
||||
INIT_LIST_HEAD(&op->pend_link);
|
||||
fscache_stat(&fscache_n_op_initialised);
|
||||
}
|
||||
EXPORT_SYMBOL(fscache_operation_init);
|
||||
|
||||
/**
|
||||
* fscache_enqueue_operation - Enqueue an operation for processing
|
||||
* @op: The operation to enqueue
|
||||
@ -75,69 +104,6 @@ static void fscache_run_op(struct fscache_object *object,
|
||||
fscache_stat(&fscache_n_op_run);
|
||||
}
|
||||
|
||||
/*
|
||||
* submit an exclusive operation for an object
|
||||
* - other ops are excluded from running simultaneously with this one
|
||||
* - this gets any extra refs it needs on an op
|
||||
*/
|
||||
int fscache_submit_exclusive_op(struct fscache_object *object,
|
||||
struct fscache_operation *op)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
|
||||
|
||||
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
|
||||
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
||||
|
||||
spin_lock(&object->lock);
|
||||
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
|
||||
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
|
||||
ASSERT(list_empty(&op->pend_link));
|
||||
|
||||
op->state = FSCACHE_OP_ST_PENDING;
|
||||
if (fscache_object_is_active(object)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
object->n_exclusive++; /* reads and writes must wait */
|
||||
|
||||
if (object->n_in_progress > 0) {
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
} else if (!list_empty(&object->pending_ops)) {
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
fscache_start_operations(object);
|
||||
} else {
|
||||
ASSERTCMP(object->n_in_progress, ==, 0);
|
||||
fscache_run_op(object, op);
|
||||
}
|
||||
|
||||
/* need to issue a new write op after this */
|
||||
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
||||
ret = 0;
|
||||
} else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
object->n_exclusive++; /* reads and writes must wait */
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
ret = 0;
|
||||
} else {
|
||||
/* If we're in any other state, there must have been an I/O
|
||||
* error of some nature.
|
||||
*/
|
||||
ASSERT(test_bit(FSCACHE_IOERROR, &object->cache->flags));
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
spin_unlock(&object->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* report an unexpected submission
|
||||
*/
|
||||
@ -175,6 +141,87 @@ static void fscache_report_unexpected_submission(struct fscache_object *object,
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
/*
|
||||
* submit an exclusive operation for an object
|
||||
* - other ops are excluded from running simultaneously with this one
|
||||
* - this gets any extra refs it needs on an op
|
||||
*/
|
||||
int fscache_submit_exclusive_op(struct fscache_object *object,
|
||||
struct fscache_operation *op)
|
||||
{
|
||||
const struct fscache_state *ostate;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
|
||||
|
||||
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
|
||||
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
||||
|
||||
spin_lock(&object->lock);
|
||||
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
|
||||
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
|
||||
ASSERT(list_empty(&op->pend_link));
|
||||
|
||||
ostate = object->state;
|
||||
smp_rmb();
|
||||
|
||||
op->state = FSCACHE_OP_ST_PENDING;
|
||||
flags = READ_ONCE(object->flags);
|
||||
if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
|
||||
fscache_stat(&fscache_n_op_rejected);
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
} else if (unlikely(fscache_cache_is_broken(object))) {
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -EIO;
|
||||
} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
object->n_exclusive++; /* reads and writes must wait */
|
||||
|
||||
if (object->n_in_progress > 0) {
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
} else if (!list_empty(&object->pending_ops)) {
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
fscache_start_operations(object);
|
||||
} else {
|
||||
ASSERTCMP(object->n_in_progress, ==, 0);
|
||||
fscache_run_op(object, op);
|
||||
}
|
||||
|
||||
/* need to issue a new write op after this */
|
||||
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
|
||||
ret = 0;
|
||||
} else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
object->n_exclusive++; /* reads and writes must wait */
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
ret = 0;
|
||||
} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
} else {
|
||||
fscache_report_unexpected_submission(object, op, ostate);
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
}
|
||||
|
||||
spin_unlock(&object->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* submit an operation for an object
|
||||
* - objects may be submitted only in the following states:
|
||||
@ -187,6 +234,7 @@ int fscache_submit_op(struct fscache_object *object,
|
||||
struct fscache_operation *op)
|
||||
{
|
||||
const struct fscache_state *ostate;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
_enter("{OBJ%x OP%x},{%u}",
|
||||
@ -204,7 +252,17 @@ int fscache_submit_op(struct fscache_object *object,
|
||||
smp_rmb();
|
||||
|
||||
op->state = FSCACHE_OP_ST_PENDING;
|
||||
if (fscache_object_is_active(object)) {
|
||||
flags = READ_ONCE(object->flags);
|
||||
if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
|
||||
fscache_stat(&fscache_n_op_rejected);
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
} else if (unlikely(fscache_cache_is_broken(object))) {
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -EIO;
|
||||
} else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
|
||||
@ -222,23 +280,21 @@ int fscache_submit_op(struct fscache_object *object,
|
||||
fscache_run_op(object, op);
|
||||
}
|
||||
ret = 0;
|
||||
} else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
|
||||
} else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
|
||||
op->object = object;
|
||||
object->n_ops++;
|
||||
atomic_inc(&op->usage);
|
||||
list_add_tail(&op->pend_link, &object->pending_ops);
|
||||
fscache_stat(&fscache_n_op_pend);
|
||||
ret = 0;
|
||||
} else if (fscache_object_is_dying(object)) {
|
||||
fscache_stat(&fscache_n_op_rejected);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
|
||||
fscache_report_unexpected_submission(object, op, ostate);
|
||||
ASSERT(!fscache_object_is_active(object));
|
||||
} else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
} else {
|
||||
fscache_report_unexpected_submission(object, op, ostate);
|
||||
ASSERT(!fscache_object_is_active(object));
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
ret = -ENOBUFS;
|
||||
}
|
||||
@ -293,9 +349,10 @@ void fscache_start_operations(struct fscache_object *object)
|
||||
* cancel an operation that's pending on an object
|
||||
*/
|
||||
int fscache_cancel_op(struct fscache_operation *op,
|
||||
void (*do_cancel)(struct fscache_operation *))
|
||||
bool cancel_in_progress_op)
|
||||
{
|
||||
struct fscache_object *object = op->object;
|
||||
bool put = false;
|
||||
int ret;
|
||||
|
||||
_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
|
||||
@ -309,19 +366,37 @@ int fscache_cancel_op(struct fscache_operation *op,
|
||||
ret = -EBUSY;
|
||||
if (op->state == FSCACHE_OP_ST_PENDING) {
|
||||
ASSERT(!list_empty(&op->pend_link));
|
||||
fscache_stat(&fscache_n_op_cancelled);
|
||||
list_del_init(&op->pend_link);
|
||||
if (do_cancel)
|
||||
do_cancel(op);
|
||||
put = true;
|
||||
|
||||
fscache_stat(&fscache_n_op_cancelled);
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
||||
object->n_exclusive--;
|
||||
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
|
||||
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
|
||||
ret = 0;
|
||||
} else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
|
||||
ASSERTCMP(object->n_in_progress, >, 0);
|
||||
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
||||
object->n_exclusive--;
|
||||
object->n_in_progress--;
|
||||
if (object->n_in_progress == 0)
|
||||
fscache_start_operations(object);
|
||||
|
||||
fscache_stat(&fscache_n_op_cancelled);
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
||||
object->n_exclusive--;
|
||||
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
|
||||
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
|
||||
fscache_put_operation(op);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (put)
|
||||
fscache_put_operation(op);
|
||||
spin_unlock(&object->lock);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
@ -345,6 +420,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
|
||||
list_del_init(&op->pend_link);
|
||||
|
||||
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
|
||||
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
||||
@ -377,8 +453,12 @@ void fscache_op_complete(struct fscache_operation *op, bool cancelled)
|
||||
|
||||
spin_lock(&object->lock);
|
||||
|
||||
op->state = cancelled ?
|
||||
FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE;
|
||||
if (!cancelled) {
|
||||
op->state = FSCACHE_OP_ST_COMPLETE;
|
||||
} else {
|
||||
op->cancel(op);
|
||||
op->state = FSCACHE_OP_ST_CANCELLED;
|
||||
}
|
||||
|
||||
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
||||
object->n_exclusive--;
|
||||
@ -409,9 +489,9 @@ void fscache_put_operation(struct fscache_operation *op)
|
||||
return;
|
||||
|
||||
_debug("PUT OP");
|
||||
ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
|
||||
ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
|
||||
op->state != FSCACHE_OP_ST_COMPLETE,
|
||||
op->state, ==, FSCACHE_OP_ST_CANCELLED);
|
||||
op->state = FSCACHE_OP_ST_DEAD;
|
||||
|
||||
fscache_stat(&fscache_n_op_release);
|
||||
|
||||
@ -419,37 +499,39 @@ void fscache_put_operation(struct fscache_operation *op)
|
||||
op->release(op);
|
||||
op->release = NULL;
|
||||
}
|
||||
op->state = FSCACHE_OP_ST_DEAD;
|
||||
|
||||
object = op->object;
|
||||
if (likely(object)) {
|
||||
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
|
||||
atomic_dec(&object->n_reads);
|
||||
if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
|
||||
fscache_unuse_cookie(object);
|
||||
|
||||
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
|
||||
atomic_dec(&object->n_reads);
|
||||
if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
|
||||
fscache_unuse_cookie(object);
|
||||
/* now... we may get called with the object spinlock held, so we
|
||||
* complete the cleanup here only if we can immediately acquire the
|
||||
* lock, and defer it otherwise */
|
||||
if (!spin_trylock(&object->lock)) {
|
||||
_debug("defer put");
|
||||
fscache_stat(&fscache_n_op_deferred_release);
|
||||
|
||||
/* now... we may get called with the object spinlock held, so we
|
||||
* complete the cleanup here only if we can immediately acquire the
|
||||
* lock, and defer it otherwise */
|
||||
if (!spin_trylock(&object->lock)) {
|
||||
_debug("defer put");
|
||||
fscache_stat(&fscache_n_op_deferred_release);
|
||||
cache = object->cache;
|
||||
spin_lock(&cache->op_gc_list_lock);
|
||||
list_add_tail(&op->pend_link, &cache->op_gc_list);
|
||||
spin_unlock(&cache->op_gc_list_lock);
|
||||
schedule_work(&cache->op_gc);
|
||||
_leave(" [defer]");
|
||||
return;
|
||||
}
|
||||
|
||||
cache = object->cache;
|
||||
spin_lock(&cache->op_gc_list_lock);
|
||||
list_add_tail(&op->pend_link, &cache->op_gc_list);
|
||||
spin_unlock(&cache->op_gc_list_lock);
|
||||
schedule_work(&cache->op_gc);
|
||||
_leave(" [defer]");
|
||||
return;
|
||||
ASSERTCMP(object->n_ops, >, 0);
|
||||
object->n_ops--;
|
||||
if (object->n_ops == 0)
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
|
||||
|
||||
spin_unlock(&object->lock);
|
||||
}
|
||||
|
||||
ASSERTCMP(object->n_ops, >, 0);
|
||||
object->n_ops--;
|
||||
if (object->n_ops == 0)
|
||||
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
|
||||
|
||||
spin_unlock(&object->lock);
|
||||
|
||||
kfree(op);
|
||||
_leave(" [done]");
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fscache_operation_init(op, fscache_attr_changed_op, NULL);
|
||||
fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL);
|
||||
op->flags = FSCACHE_OP_ASYNC |
|
||||
(1 << FSCACHE_OP_EXCLUSIVE) |
|
||||
(1 << FSCACHE_OP_UNUSE_COOKIE);
|
||||
@ -239,7 +239,7 @@ nobufs_dec:
|
||||
wake_cookie = __fscache_unuse_cookie(cookie);
|
||||
nobufs:
|
||||
spin_unlock(&cookie->lock);
|
||||
kfree(op);
|
||||
fscache_put_operation(op);
|
||||
if (wake_cookie)
|
||||
__fscache_wake_unused_cookie(cookie);
|
||||
fscache_stat(&fscache_n_attr_changed_nobufs);
|
||||
@ -248,6 +248,17 @@ nobufs:
|
||||
}
|
||||
EXPORT_SYMBOL(__fscache_attr_changed);
|
||||
|
||||
/*
|
||||
* Handle cancellation of a pending retrieval op
|
||||
*/
|
||||
static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
|
||||
{
|
||||
struct fscache_retrieval *op =
|
||||
container_of(_op, struct fscache_retrieval, op);
|
||||
|
||||
atomic_set(&op->n_pages, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* release a retrieval op reference
|
||||
*/
|
||||
@ -258,11 +269,12 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
|
||||
|
||||
_enter("{OP%x}", op->op.debug_id);
|
||||
|
||||
ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
|
||||
ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
|
||||
atomic_read(&op->n_pages), ==, 0);
|
||||
|
||||
fscache_hist(fscache_retrieval_histogram, op->start_time);
|
||||
if (op->context)
|
||||
fscache_put_context(op->op.object->cookie, op->context);
|
||||
fscache_put_context(op->cookie, op->context);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
@ -285,15 +297,24 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
|
||||
fscache_operation_init(&op->op, NULL,
|
||||
fscache_do_cancel_retrieval,
|
||||
fscache_release_retrieval_op);
|
||||
op->op.flags = FSCACHE_OP_MYTHREAD |
|
||||
(1UL << FSCACHE_OP_WAITING) |
|
||||
(1UL << FSCACHE_OP_UNUSE_COOKIE);
|
||||
op->cookie = cookie;
|
||||
op->mapping = mapping;
|
||||
op->end_io_func = end_io_func;
|
||||
op->context = context;
|
||||
op->start_time = jiffies;
|
||||
INIT_LIST_HEAD(&op->to_do);
|
||||
|
||||
/* Pin the netfs read context in case we need to do the actual netfs
|
||||
* read because we've encountered a cache read failure.
|
||||
*/
|
||||
if (context)
|
||||
fscache_get_context(op->cookie, context);
|
||||
return op;
|
||||
}
|
||||
|
||||
@ -329,25 +350,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle cancellation of a pending retrieval op
|
||||
*/
|
||||
static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
|
||||
{
|
||||
struct fscache_retrieval *op =
|
||||
container_of(_op, struct fscache_retrieval, op);
|
||||
|
||||
atomic_set(&op->n_pages, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for an object to become active (or dead)
|
||||
*/
|
||||
int fscache_wait_for_operation_activation(struct fscache_object *object,
|
||||
struct fscache_operation *op,
|
||||
atomic_t *stat_op_waits,
|
||||
atomic_t *stat_object_dead,
|
||||
void (*do_cancel)(struct fscache_operation *))
|
||||
atomic_t *stat_object_dead)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -359,7 +368,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
|
||||
fscache_stat(stat_op_waits);
|
||||
if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
|
||||
TASK_INTERRUPTIBLE) != 0) {
|
||||
ret = fscache_cancel_op(op, do_cancel);
|
||||
ret = fscache_cancel_op(op, false);
|
||||
if (ret == 0)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
@ -377,11 +386,13 @@ check_if_dead:
|
||||
_leave(" = -ENOBUFS [cancelled]");
|
||||
return -ENOBUFS;
|
||||
}
|
||||
if (unlikely(fscache_object_is_dead(object))) {
|
||||
pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
|
||||
fscache_cancel_op(op, do_cancel);
|
||||
if (unlikely(fscache_object_is_dying(object) ||
|
||||
fscache_cache_is_broken(object))) {
|
||||
enum fscache_operation_state state = op->state;
|
||||
fscache_cancel_op(op, true);
|
||||
if (stat_object_dead)
|
||||
fscache_stat(stat_object_dead);
|
||||
_leave(" = -ENOBUFS [obj dead %d]", state);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
return 0;
|
||||
@ -453,17 +464,12 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
|
||||
|
||||
fscache_stat(&fscache_n_retrieval_ops);
|
||||
|
||||
/* pin the netfs read context in case we need to do the actual netfs
|
||||
* read because we've encountered a cache read failure */
|
||||
fscache_get_context(object->cookie, op->context);
|
||||
|
||||
/* we wait for the operation to become active, and then process it
|
||||
* *here*, in this thread, and not in the thread pool */
|
||||
ret = fscache_wait_for_operation_activation(
|
||||
object, &op->op,
|
||||
__fscache_stat(&fscache_n_retrieval_op_waits),
|
||||
__fscache_stat(&fscache_n_retrievals_object_dead),
|
||||
fscache_do_cancel_retrieval);
|
||||
__fscache_stat(&fscache_n_retrievals_object_dead));
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
@ -503,7 +509,7 @@ nobufs_unlock:
|
||||
spin_unlock(&cookie->lock);
|
||||
if (wake_cookie)
|
||||
__fscache_wake_unused_cookie(cookie);
|
||||
kfree(op);
|
||||
fscache_put_retrieval(op);
|
||||
nobufs:
|
||||
fscache_stat(&fscache_n_retrievals_nobufs);
|
||||
_leave(" = -ENOBUFS");
|
||||
@ -584,17 +590,12 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
|
||||
|
||||
fscache_stat(&fscache_n_retrieval_ops);
|
||||
|
||||
/* pin the netfs read context in case we need to do the actual netfs
|
||||
* read because we've encountered a cache read failure */
|
||||
fscache_get_context(object->cookie, op->context);
|
||||
|
||||
/* we wait for the operation to become active, and then process it
|
||||
* *here*, in this thread, and not in the thread pool */
|
||||
ret = fscache_wait_for_operation_activation(
|
||||
object, &op->op,
|
||||
__fscache_stat(&fscache_n_retrieval_op_waits),
|
||||
__fscache_stat(&fscache_n_retrievals_object_dead),
|
||||
fscache_do_cancel_retrieval);
|
||||
__fscache_stat(&fscache_n_retrievals_object_dead));
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
@ -632,7 +633,7 @@ nobufs_unlock_dec:
|
||||
wake_cookie = __fscache_unuse_cookie(cookie);
|
||||
nobufs_unlock:
|
||||
spin_unlock(&cookie->lock);
|
||||
kfree(op);
|
||||
fscache_put_retrieval(op);
|
||||
if (wake_cookie)
|
||||
__fscache_wake_unused_cookie(cookie);
|
||||
nobufs:
|
||||
@ -700,8 +701,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
|
||||
ret = fscache_wait_for_operation_activation(
|
||||
object, &op->op,
|
||||
__fscache_stat(&fscache_n_alloc_op_waits),
|
||||
__fscache_stat(&fscache_n_allocs_object_dead),
|
||||
fscache_do_cancel_retrieval);
|
||||
__fscache_stat(&fscache_n_allocs_object_dead));
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
@ -726,7 +726,7 @@ nobufs_unlock_dec:
|
||||
wake_cookie = __fscache_unuse_cookie(cookie);
|
||||
nobufs_unlock:
|
||||
spin_unlock(&cookie->lock);
|
||||
kfree(op);
|
||||
fscache_put_retrieval(op);
|
||||
if (wake_cookie)
|
||||
__fscache_wake_unused_cookie(cookie);
|
||||
nobufs:
|
||||
@ -944,7 +944,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
||||
if (!op)
|
||||
goto nomem;
|
||||
|
||||
fscache_operation_init(&op->op, fscache_write_op,
|
||||
fscache_operation_init(&op->op, fscache_write_op, NULL,
|
||||
fscache_release_write_op);
|
||||
op->op.flags = FSCACHE_OP_ASYNC |
|
||||
(1 << FSCACHE_OP_WAITING) |
|
||||
@ -1016,7 +1016,7 @@ already_pending:
|
||||
spin_unlock(&object->lock);
|
||||
spin_unlock(&cookie->lock);
|
||||
radix_tree_preload_end();
|
||||
kfree(op);
|
||||
fscache_put_operation(&op->op);
|
||||
fscache_stat(&fscache_n_stores_ok);
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
@ -1036,7 +1036,7 @@ nobufs_unlock_obj:
|
||||
nobufs:
|
||||
spin_unlock(&cookie->lock);
|
||||
radix_tree_preload_end();
|
||||
kfree(op);
|
||||
fscache_put_operation(&op->op);
|
||||
if (wake_cookie)
|
||||
__fscache_wake_unused_cookie(cookie);
|
||||
fscache_stat(&fscache_n_stores_nobufs);
|
||||
@ -1044,7 +1044,7 @@ nobufs:
|
||||
return -ENOBUFS;
|
||||
|
||||
nomem_free:
|
||||
kfree(op);
|
||||
fscache_put_operation(&op->op);
|
||||
nomem:
|
||||
fscache_stat(&fscache_n_stores_oom);
|
||||
_leave(" = -ENOMEM");
|
||||
|
@ -23,6 +23,7 @@ atomic_t fscache_n_op_run;
|
||||
atomic_t fscache_n_op_enqueue;
|
||||
atomic_t fscache_n_op_requeue;
|
||||
atomic_t fscache_n_op_deferred_release;
|
||||
atomic_t fscache_n_op_initialised;
|
||||
atomic_t fscache_n_op_release;
|
||||
atomic_t fscache_n_op_gc;
|
||||
atomic_t fscache_n_op_cancelled;
|
||||
@ -130,6 +131,11 @@ atomic_t fscache_n_cop_write_page;
|
||||
atomic_t fscache_n_cop_uncache_page;
|
||||
atomic_t fscache_n_cop_dissociate_pages;
|
||||
|
||||
atomic_t fscache_n_cache_no_space_reject;
|
||||
atomic_t fscache_n_cache_stale_objects;
|
||||
atomic_t fscache_n_cache_retired_objects;
|
||||
atomic_t fscache_n_cache_culled_objects;
|
||||
|
||||
/*
|
||||
* display the general statistics
|
||||
*/
|
||||
@ -246,7 +252,8 @@ static int fscache_stats_show(struct seq_file *m, void *v)
|
||||
atomic_read(&fscache_n_op_enqueue),
|
||||
atomic_read(&fscache_n_op_cancelled),
|
||||
atomic_read(&fscache_n_op_rejected));
|
||||
seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
|
||||
seq_printf(m, "Ops : ini=%u dfr=%u rel=%u gc=%u\n",
|
||||
atomic_read(&fscache_n_op_initialised),
|
||||
atomic_read(&fscache_n_op_deferred_release),
|
||||
atomic_read(&fscache_n_op_release),
|
||||
atomic_read(&fscache_n_op_gc));
|
||||
@ -271,6 +278,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
|
||||
atomic_read(&fscache_n_cop_write_page),
|
||||
atomic_read(&fscache_n_cop_uncache_page),
|
||||
atomic_read(&fscache_n_cop_dissociate_pages));
|
||||
seq_printf(m, "CacheEv: nsp=%d stl=%d rtr=%d cul=%d\n",
|
||||
atomic_read(&fscache_n_cache_no_space_reject),
|
||||
atomic_read(&fscache_n_cache_stale_objects),
|
||||
atomic_read(&fscache_n_cache_retired_objects),
|
||||
atomic_read(&fscache_n_cache_culled_objects));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq;
|
||||
*/
|
||||
typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
|
||||
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
|
||||
typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
|
||||
|
||||
enum fscache_operation_state {
|
||||
FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
|
||||
@ -109,6 +110,9 @@ struct fscache_operation {
|
||||
* the op in a non-pool thread */
|
||||
fscache_operation_processor_t processor;
|
||||
|
||||
/* Operation cancellation cleanup (optional) */
|
||||
fscache_operation_cancel_t cancel;
|
||||
|
||||
/* operation releaser */
|
||||
fscache_operation_release_t release;
|
||||
};
|
||||
@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work);
|
||||
extern void fscache_enqueue_operation(struct fscache_operation *);
|
||||
extern void fscache_op_complete(struct fscache_operation *, bool);
|
||||
extern void fscache_put_operation(struct fscache_operation *);
|
||||
|
||||
/**
|
||||
* fscache_operation_init - Do basic initialisation of an operation
|
||||
* @op: The operation to initialise
|
||||
* @release: The release function to assign
|
||||
*
|
||||
* Do basic initialisation of an operation. The caller must still set flags,
|
||||
* object and processor if needed.
|
||||
*/
|
||||
static inline void fscache_operation_init(struct fscache_operation *op,
|
||||
fscache_operation_processor_t processor,
|
||||
fscache_operation_release_t release)
|
||||
{
|
||||
INIT_WORK(&op->work, fscache_op_work_func);
|
||||
atomic_set(&op->usage, 1);
|
||||
op->state = FSCACHE_OP_ST_INITIALISED;
|
||||
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
|
||||
op->processor = processor;
|
||||
op->release = release;
|
||||
INIT_LIST_HEAD(&op->pend_link);
|
||||
}
|
||||
extern void fscache_operation_init(struct fscache_operation *,
|
||||
fscache_operation_processor_t,
|
||||
fscache_operation_cancel_t,
|
||||
fscache_operation_release_t);
|
||||
|
||||
/*
|
||||
* data read operation
|
||||
*/
|
||||
struct fscache_retrieval {
|
||||
struct fscache_operation op;
|
||||
struct fscache_cookie *cookie; /* The netfs cookie */
|
||||
struct address_space *mapping; /* netfs pages */
|
||||
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
|
||||
void *context; /* netfs read context (pinned) */
|
||||
@ -371,6 +359,7 @@ struct fscache_object {
|
||||
#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
|
||||
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
|
||||
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
|
||||
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
|
||||
|
||||
struct list_head cache_link; /* link in cache->object_list */
|
||||
struct hlist_node cookie_link; /* link in cookie->backing_objects */
|
||||
@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object)
|
||||
return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
|
||||
}
|
||||
|
||||
static inline bool fscache_cache_is_broken(struct fscache_object *object)
|
||||
{
|
||||
return test_bit(FSCACHE_IOERROR, &object->cache->flags);
|
||||
}
|
||||
|
||||
static inline bool fscache_object_is_active(struct fscache_object *object)
|
||||
{
|
||||
return fscache_object_is_available(object) &&
|
||||
fscache_object_is_live(object) &&
|
||||
!test_bit(FSCACHE_IOERROR, &object->cache->flags);
|
||||
}
|
||||
|
||||
static inline bool fscache_object_is_dead(struct fscache_object *object)
|
||||
{
|
||||
return fscache_object_is_dying(object) &&
|
||||
test_bit(FSCACHE_IOERROR, &object->cache->flags);
|
||||
!fscache_cache_is_broken(object);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
|
||||
const void *data,
|
||||
uint16_t datalen);
|
||||
|
||||
extern void fscache_object_retrying_stale(struct fscache_object *object);
|
||||
|
||||
enum fscache_why_object_killed {
|
||||
FSCACHE_OBJECT_IS_STALE,
|
||||
FSCACHE_OBJECT_NO_SPACE,
|
||||
FSCACHE_OBJECT_WAS_RETIRED,
|
||||
FSCACHE_OBJECT_WAS_CULLED,
|
||||
};
|
||||
extern void fscache_object_mark_killed(struct fscache_object *object,
|
||||
enum fscache_why_object_killed why);
|
||||
|
||||
#endif /* _LINUX_FSCACHE_CACHE_H */
|
||||
|
Loading…
Reference in New Issue
Block a user