2015-07-17 22:38:15 +08:00
|
|
|
/*
|
|
|
|
* (C) 2001 Clemson University and The University of Chicago
|
|
|
|
* (C) 2011 Omnibond Systems
|
|
|
|
*
|
|
|
|
* Changes by Acxiom Corporation to implement generic service_operation()
|
|
|
|
* function, Copyright Acxiom Corporation, 2005.
|
|
|
|
*
|
|
|
|
* See COPYING in top-level directory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In-kernel waitqueue operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "protocol.h"
|
2015-12-05 01:56:14 +08:00
|
|
|
#include "orangefs-kernel.h"
|
|
|
|
#include "orangefs-bufmap.h"
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2016-01-22 11:58:58 +08:00
|
|
|
static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *);
|
|
|
|
static int wait_for_matching_downcall(struct orangefs_kernel_op_s *);
|
|
|
|
|
2015-07-17 22:38:15 +08:00
|
|
|
/*
|
|
|
|
* What we do in this function is to walk the list of operations that are
|
|
|
|
* present in the request queue and mark them as purged.
|
|
|
|
* NOTE: This is called from the device close after client-core has
|
|
|
|
* guaranteed that no new operations could appear on the list since the
|
|
|
|
* client-core is anyway going to exit.
|
|
|
|
*/
|
|
|
|
void purge_waiting_ops(void)
|
|
|
|
{
|
2015-11-25 04:12:14 +08:00
|
|
|
struct orangefs_kernel_op_s *op;
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2015-11-25 04:12:14 +08:00
|
|
|
spin_lock(&orangefs_request_list_lock);
|
|
|
|
list_for_each_entry(op, &orangefs_request_list, list) {
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"pvfs2-client-core: purging op tag %llu %s\n",
|
|
|
|
llu(op->tag),
|
|
|
|
get_opname_string(op));
|
|
|
|
spin_lock(&op->lock);
|
|
|
|
set_op_state_purged(op);
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
}
|
2015-11-25 04:12:14 +08:00
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
|
|
|
|
2016-01-20 01:26:13 +08:00
|
|
|
static inline void
|
|
|
|
add_op_to_request_list(struct orangefs_kernel_op_s *op)
|
|
|
|
{
|
|
|
|
spin_lock(&orangefs_request_list_lock);
|
|
|
|
spin_lock(&op->lock);
|
|
|
|
set_op_state_waiting(op);
|
|
|
|
list_add_tail(&op->list, &orangefs_request_list);
|
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
wake_up_interruptible(&orangefs_request_list_waitq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
void add_priority_op_to_request_list(struct orangefs_kernel_op_s *op)
|
|
|
|
{
|
|
|
|
spin_lock(&orangefs_request_list_lock);
|
|
|
|
spin_lock(&op->lock);
|
|
|
|
set_op_state_waiting(op);
|
|
|
|
|
|
|
|
list_add(&op->list, &orangefs_request_list);
|
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
wake_up_interruptible(&orangefs_request_list_waitq);
|
|
|
|
}
|
|
|
|
|
2015-07-17 22:38:15 +08:00
|
|
|
/*
|
2015-11-25 04:12:14 +08:00
|
|
|
* submits a ORANGEFS operation and waits for it to complete
|
2015-07-17 22:38:15 +08:00
|
|
|
*
|
|
|
|
* Note op->downcall.status will contain the status of the operation (in
|
|
|
|
* errno format), whether provided by pvfs2-client or a result of failure to
|
|
|
|
* service the operation. If the caller wishes to distinguish, then
|
|
|
|
* op->state can be checked to see if it was serviced or not.
|
|
|
|
*
|
|
|
|
* Returns contents of op->downcall.status for convenience
|
|
|
|
*/
|
2015-11-25 04:12:14 +08:00
|
|
|
int service_operation(struct orangefs_kernel_op_s *op,
|
2015-07-17 22:38:15 +08:00
|
|
|
const char *op_name,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
/* flags to modify behavior */
|
|
|
|
sigset_t orig_sigset;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* irqflags and wait_entry are only used IF the client-core aborts */
|
|
|
|
unsigned long irqflags;
|
|
|
|
|
2015-12-15 03:54:46 +08:00
|
|
|
DEFINE_WAIT(wait_entry);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
|
|
|
op->upcall.tgid = current->tgid;
|
|
|
|
op->upcall.pid = current->pid;
|
|
|
|
|
|
|
|
retry_servicing:
|
|
|
|
op->downcall.status = 0;
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2015-11-25 04:12:14 +08:00
|
|
|
"orangefs: service_operation: %s %p\n",
|
2015-07-17 22:38:15 +08:00
|
|
|
op_name,
|
|
|
|
op);
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2015-11-25 04:12:14 +08:00
|
|
|
"orangefs: operation posted by process: %s, pid: %i\n",
|
2015-07-17 22:38:15 +08:00
|
|
|
current->comm,
|
|
|
|
current->pid);
|
|
|
|
|
|
|
|
/* mask out signals if this operation is not to be interrupted */
|
2015-11-25 04:12:14 +08:00
|
|
|
if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
|
2016-01-03 06:04:47 +08:00
|
|
|
orangefs_block_signals(&orig_sigset);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
2015-11-25 04:12:14 +08:00
|
|
|
if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
|
2015-07-17 22:38:15 +08:00
|
|
|
ret = mutex_lock_interruptible(&request_mutex);
|
|
|
|
/*
|
|
|
|
* check to see if we were interrupted while waiting for
|
|
|
|
* semaphore
|
|
|
|
*/
|
|
|
|
if (ret < 0) {
|
2015-11-25 04:12:14 +08:00
|
|
|
if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
|
2016-01-03 06:04:47 +08:00
|
|
|
orangefs_set_signals(&orig_sigset);
|
2015-07-17 22:38:15 +08:00
|
|
|
op->downcall.status = ret;
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2015-11-25 04:12:14 +08:00
|
|
|
"orangefs: service_operation interrupted.\n");
|
2015-07-17 22:38:15 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:About to call is_daemon_in_service().\n",
|
|
|
|
__func__);
|
|
|
|
|
|
|
|
if (is_daemon_in_service() < 0) {
|
|
|
|
/*
|
|
|
|
* By incrementing the per-operation attempt counter, we
|
|
|
|
* directly go into the timeout logic while waiting for
|
|
|
|
* the matching downcall to be read
|
|
|
|
*/
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:client core is NOT in service(%d).\n",
|
|
|
|
__func__,
|
|
|
|
is_daemon_in_service());
|
|
|
|
op->attempts++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* queue up the operation */
|
2015-11-25 04:12:14 +08:00
|
|
|
if (flags & ORANGEFS_OP_PRIORITY) {
|
2015-07-17 22:38:15 +08:00
|
|
|
add_priority_op_to_request_list(op);
|
|
|
|
} else {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:About to call add_op_to_request_list().\n",
|
|
|
|
__func__);
|
|
|
|
add_op_to_request_list(op);
|
|
|
|
}
|
|
|
|
|
2015-11-25 04:12:14 +08:00
|
|
|
if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
|
2015-07-17 22:38:15 +08:00
|
|
|
mutex_unlock(&request_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are asked to service an asynchronous operation from
|
|
|
|
* VFS perspective, we are done.
|
|
|
|
*/
|
2015-11-25 04:12:14 +08:00
|
|
|
if (flags & ORANGEFS_OP_ASYNC)
|
2015-07-17 22:38:15 +08:00
|
|
|
return 0;
|
|
|
|
|
2015-11-25 04:12:14 +08:00
|
|
|
if (flags & ORANGEFS_OP_CANCELLATION) {
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:"
|
|
|
|
"About to call wait_for_cancellation_downcall.\n",
|
|
|
|
__func__);
|
|
|
|
ret = wait_for_cancellation_downcall(op);
|
|
|
|
} else {
|
|
|
|
ret = wait_for_matching_downcall(op);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
/* failed to get matching downcall */
|
|
|
|
if (ret == -ETIMEDOUT) {
|
2015-11-25 04:12:14 +08:00
|
|
|
gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
|
2015-07-17 22:38:15 +08:00
|
|
|
op_name);
|
|
|
|
}
|
|
|
|
op->downcall.status = ret;
|
|
|
|
} else {
|
|
|
|
/* got matching downcall; make sure status is in errno format */
|
|
|
|
op->downcall.status =
|
2015-11-25 04:12:14 +08:00
|
|
|
orangefs_normalize_to_errno(op->downcall.status);
|
2015-07-17 22:38:15 +08:00
|
|
|
ret = op->downcall.status;
|
|
|
|
}
|
|
|
|
|
2015-11-25 04:12:14 +08:00
|
|
|
if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
|
2016-01-03 06:04:47 +08:00
|
|
|
orangefs_set_signals(&orig_sigset);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
|
|
|
BUG_ON(ret != op->downcall.status);
|
|
|
|
/* retry if operation has not been serviced and if requested */
|
|
|
|
if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2015-11-25 04:12:14 +08:00
|
|
|
"orangefs: tag %llu (%s)"
|
2015-07-17 22:38:15 +08:00
|
|
|
" -- operation to be retried (%d attempt)\n",
|
|
|
|
llu(op->tag),
|
|
|
|
op_name,
|
|
|
|
op->attempts + 1);
|
|
|
|
|
|
|
|
if (!op->uses_shared_memory)
|
|
|
|
/*
|
|
|
|
* this operation doesn't use the shared memory
|
|
|
|
* system
|
|
|
|
*/
|
|
|
|
goto retry_servicing;
|
|
|
|
|
|
|
|
/* op uses shared memory */
|
2016-01-05 04:05:28 +08:00
|
|
|
if (orangefs_get_bufmap_init() == 0) {
|
2015-07-17 22:38:15 +08:00
|
|
|
/*
|
|
|
|
* This operation uses the shared memory system AND
|
|
|
|
* the system is not yet ready. This situation occurs
|
|
|
|
* when the client-core is restarted AND there were
|
|
|
|
* operations waiting to be processed or were already
|
|
|
|
* in process.
|
|
|
|
*/
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"uses_shared_memory is true.\n");
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"Client core in-service status(%d).\n",
|
|
|
|
is_daemon_in_service());
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
|
2016-01-05 04:05:28 +08:00
|
|
|
orangefs_get_bufmap_init());
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"operation's status is 0x%0x.\n",
|
|
|
|
op->op_state);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* let process sleep for a few seconds so shared
|
|
|
|
* memory system can be initialized.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&op->lock, irqflags);
|
2015-12-15 03:54:46 +08:00
|
|
|
prepare_to_wait(&orangefs_bufmap_init_waitq,
|
|
|
|
&wait_entry,
|
|
|
|
TASK_INTERRUPTIBLE);
|
2015-07-17 22:38:15 +08:00
|
|
|
spin_unlock_irqrestore(&op->lock, irqflags);
|
|
|
|
|
|
|
|
/*
|
2015-11-25 04:12:14 +08:00
|
|
|
* Wait for orangefs_bufmap_initialize() to wake me up
|
2015-07-17 22:38:15 +08:00
|
|
|
* within the allotted time.
|
|
|
|
*/
|
|
|
|
ret = schedule_timeout(MSECS_TO_JIFFIES
|
2015-11-25 04:12:14 +08:00
|
|
|
(1000 * ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS));
|
2015-07-17 22:38:15 +08:00
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"Value returned from schedule_timeout:"
|
|
|
|
"%d.\n",
|
|
|
|
ret);
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"Is shared memory available? (%d).\n",
|
2016-01-05 04:05:28 +08:00
|
|
|
orangefs_get_bufmap_init());
|
2015-07-17 22:38:15 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&op->lock, irqflags);
|
2015-12-15 03:54:46 +08:00
|
|
|
finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
|
2015-07-17 22:38:15 +08:00
|
|
|
spin_unlock_irqrestore(&op->lock, irqflags);
|
|
|
|
|
2016-01-05 04:05:28 +08:00
|
|
|
if (orangefs_get_bufmap_init() == 0) {
|
2015-07-17 22:38:15 +08:00
|
|
|
gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted. Aborting user's request(%s).\n",
|
|
|
|
__func__,
|
2015-11-25 04:12:14 +08:00
|
|
|
ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
|
2015-07-17 22:38:15 +08:00
|
|
|
get_opname_string(op));
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return to the calling function and re-populate a
|
|
|
|
* shared memory buffer.
|
|
|
|
*/
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
2015-11-25 04:12:14 +08:00
|
|
|
"orangefs: service_operation %s returning: %d for %p.\n",
|
2015-07-17 22:38:15 +08:00
|
|
|
op_name,
|
|
|
|
ret,
|
|
|
|
op);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-20 01:26:13 +08:00
|
|
|
static inline void remove_op_from_request_list(struct orangefs_kernel_op_s *op)
|
|
|
|
{
|
|
|
|
struct list_head *tmp = NULL;
|
|
|
|
struct list_head *tmp_safe = NULL;
|
|
|
|
struct orangefs_kernel_op_s *tmp_op = NULL;
|
|
|
|
|
|
|
|
spin_lock(&orangefs_request_list_lock);
|
|
|
|
list_for_each_safe(tmp, tmp_safe, &orangefs_request_list) {
|
|
|
|
tmp_op = list_entry(tmp,
|
|
|
|
struct orangefs_kernel_op_s,
|
|
|
|
list);
|
|
|
|
if (tmp_op && (tmp_op == op)) {
|
|
|
|
list_del(&tmp_op->list);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&orangefs_request_list_lock);
|
|
|
|
}
|
|
|
|
|
2016-01-22 11:21:41 +08:00
|
|
|
static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
|
2015-07-17 22:38:15 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* handle interrupted cases depending on what state we were in when
|
|
|
|
* the interruption is detected. there is a coarse grained lock
|
|
|
|
* across the operation.
|
|
|
|
*
|
|
|
|
* NOTE: be sure not to reverse lock ordering by locking an op lock
|
|
|
|
* while holding the request_list lock. Here, we first lock the op
|
|
|
|
* and then lock the appropriate list.
|
|
|
|
*/
|
|
|
|
if (!op) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s: op is null, ignoring\n",
|
|
|
|
__func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* one more sanity check, make sure it's in one of the possible states
|
|
|
|
* or don't try to cancel it
|
|
|
|
*/
|
|
|
|
if (!(op_state_waiting(op) ||
|
|
|
|
op_state_in_progress(op) ||
|
|
|
|
op_state_serviced(op) ||
|
|
|
|
op_state_purged(op))) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s: op %p not in a valid state (%0x), "
|
|
|
|
"ignoring\n",
|
|
|
|
__func__,
|
|
|
|
op,
|
|
|
|
op->op_state);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&op->lock);
|
|
|
|
|
|
|
|
if (op_state_waiting(op)) {
|
|
|
|
/*
|
|
|
|
* upcall hasn't been read; remove op from upcall request
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
remove_op_from_request_list(op);
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"Interrupted: Removed op %p from request_list\n",
|
|
|
|
op);
|
|
|
|
} else if (op_state_in_progress(op)) {
|
|
|
|
/* op must be removed from the in progress htable */
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
spin_lock(&htable_ops_in_progress_lock);
|
|
|
|
list_del(&op->list);
|
|
|
|
spin_unlock(&htable_ops_in_progress_lock);
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"Interrupted: Removed op %p"
|
|
|
|
" from htable_ops_in_progress\n",
|
|
|
|
op);
|
|
|
|
} else if (!op_state_serviced(op)) {
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
gossip_err("interrupted operation is in a weird state 0x%x\n",
|
|
|
|
op->op_state);
|
2015-07-29 01:27:51 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* It is not intended for execution to flow here,
|
|
|
|
* but having this unlock here makes sparse happy.
|
|
|
|
*/
|
|
|
|
gossip_err("%s: can't get here.\n", __func__);
|
|
|
|
spin_unlock(&op->lock);
|
2015-07-17 22:38:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sleeps on waitqueue waiting for matching downcall.
|
|
|
|
* if client-core finishes servicing, then we are good to go.
|
|
|
|
* else if client-core exits, we get woken up here, and retry with a timeout
|
|
|
|
*
|
|
|
|
* Post when this call returns to the caller, the specified op will no
|
|
|
|
* longer be on any list or htable.
|
|
|
|
*
|
|
|
|
* Returns 0 on success and -errno on failure
|
|
|
|
* Errors are:
|
|
|
|
* EAGAIN in case we want the caller to requeue and try again..
|
|
|
|
* EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
|
|
|
|
* operation since client-core seems to be exiting too often
|
|
|
|
* or if we were interrupted.
|
|
|
|
*/
|
2016-01-22 11:58:58 +08:00
|
|
|
static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
|
2015-07-17 22:38:15 +08:00
|
|
|
{
|
|
|
|
int ret = -EINVAL;
|
2015-12-15 03:54:46 +08:00
|
|
|
DEFINE_WAIT(wait_entry);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
spin_lock(&op->lock);
|
2015-12-15 03:54:46 +08:00
|
|
|
prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
|
2015-07-17 22:38:15 +08:00
|
|
|
if (op_state_serviced(op)) {
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
|
|
|
|
if (!signal_pending(current)) {
|
|
|
|
/*
|
|
|
|
* if this was our first attempt and client-core
|
|
|
|
* has not purged our operation, we are happy to
|
|
|
|
* simply wait
|
|
|
|
*/
|
|
|
|
spin_lock(&op->lock);
|
|
|
|
if (op->attempts == 0 && !op_state_purged(op)) {
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
schedule();
|
|
|
|
} else {
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
/*
|
|
|
|
* subsequent attempts, we retry exactly once
|
|
|
|
* with timeouts
|
|
|
|
*/
|
|
|
|
if (!schedule_timeout(MSECS_TO_JIFFIES
|
|
|
|
(1000 * op_timeout_secs))) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"*** %s:"
|
|
|
|
" operation timed out (tag"
|
|
|
|
" %llu, %p, att %d)\n",
|
|
|
|
__func__,
|
|
|
|
llu(op->tag),
|
|
|
|
op,
|
|
|
|
op->attempts);
|
|
|
|
ret = -ETIMEDOUT;
|
2015-11-25 04:12:14 +08:00
|
|
|
orangefs_clean_up_interrupted_operation
|
2015-07-17 22:38:15 +08:00
|
|
|
(op);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_lock(&op->lock);
|
|
|
|
op->attempts++;
|
|
|
|
/*
|
|
|
|
* if the operation was purged in the meantime, it
|
|
|
|
* is better to requeue it afresh but ensure that
|
|
|
|
* we have not been purged repeatedly. This could
|
|
|
|
* happen if client-core crashes when an op
|
|
|
|
* is being serviced, so we requeue the op, client
|
|
|
|
* core crashes again so we requeue the op, client
|
|
|
|
* core starts, and so on...
|
|
|
|
*/
|
|
|
|
if (op_state_purged(op)) {
|
2015-11-25 04:12:14 +08:00
|
|
|
ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
|
2015-07-17 22:38:15 +08:00
|
|
|
-EAGAIN :
|
|
|
|
-EIO;
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"*** %s:"
|
|
|
|
" operation purged (tag "
|
|
|
|
"%llu, %p, att %d)\n",
|
|
|
|
__func__,
|
|
|
|
llu(op->tag),
|
|
|
|
op,
|
|
|
|
op->attempts);
|
2015-11-25 04:12:14 +08:00
|
|
|
orangefs_clean_up_interrupted_operation(op);
|
2015-07-17 22:38:15 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"*** %s:"
|
|
|
|
" operation interrupted by a signal (tag "
|
|
|
|
"%llu, op %p)\n",
|
|
|
|
__func__,
|
|
|
|
llu(op->tag),
|
|
|
|
op);
|
2015-11-25 04:12:14 +08:00
|
|
|
orangefs_clean_up_interrupted_operation(op);
|
2015-07-17 22:38:15 +08:00
|
|
|
ret = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&op->lock);
|
2015-12-15 03:54:46 +08:00
|
|
|
finish_wait(&op->waitq, &wait_entry);
|
2015-07-17 22:38:15 +08:00
|
|
|
spin_unlock(&op->lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* similar to wait_for_matching_downcall(), but used in the special case
|
|
|
|
* of I/O cancellations.
|
|
|
|
*
|
|
|
|
* Note we need a special wait function because if this is called we already
|
|
|
|
* know that a signal is pending in current and need to service the
|
|
|
|
* cancellation upcall anyway. the only way to exit this is to either
|
|
|
|
* timeout or have the cancellation be serviced properly.
|
|
|
|
*/
|
2016-01-22 11:58:58 +08:00
|
|
|
static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
|
2015-07-17 22:38:15 +08:00
|
|
|
{
|
|
|
|
int ret = -EINVAL;
|
2015-12-15 03:54:46 +08:00
|
|
|
DEFINE_WAIT(wait_entry);
|
2015-07-17 22:38:15 +08:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
spin_lock(&op->lock);
|
2015-12-15 03:54:46 +08:00
|
|
|
prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
|
2015-07-17 22:38:15 +08:00
|
|
|
if (op_state_serviced(op)) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:op-state is SERVICED.\n",
|
|
|
|
__func__);
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&op->lock);
|
|
|
|
|
|
|
|
if (signal_pending(current)) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:operation interrupted by a signal (tag"
|
|
|
|
" %llu, op %p)\n",
|
|
|
|
__func__,
|
|
|
|
llu(op->tag),
|
|
|
|
op);
|
2015-11-25 04:12:14 +08:00
|
|
|
orangefs_clean_up_interrupted_operation(op);
|
2015-07-17 22:38:15 +08:00
|
|
|
ret = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:About to call schedule_timeout.\n",
|
|
|
|
__func__);
|
|
|
|
ret =
|
|
|
|
schedule_timeout(MSECS_TO_JIFFIES(1000 * op_timeout_secs));
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:Value returned from schedule_timeout(%d).\n",
|
|
|
|
__func__,
|
|
|
|
ret);
|
|
|
|
if (!ret) {
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:*** operation timed out: %p\n",
|
|
|
|
__func__,
|
|
|
|
op);
|
2015-11-25 04:12:14 +08:00
|
|
|
orangefs_clean_up_interrupted_operation(op);
|
2015-07-17 22:38:15 +08:00
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n",
|
|
|
|
__func__);
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&op->lock);
|
2015-12-15 03:54:46 +08:00
|
|
|
finish_wait(&op->waitq, &wait_entry);
|
2015-07-17 22:38:15 +08:00
|
|
|
spin_unlock(&op->lock);
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_WAIT_DEBUG,
|
|
|
|
"%s:returning ret(%d)\n",
|
|
|
|
__func__,
|
|
|
|
ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|