mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
s390/qdio: remove remaining tasklet & timer code
Both qdio drivers have moved away from using qdio's internal tasklet and timer mechanisms for Output Queues. Rip out all the leftovers. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Reviewed-by: Benjamin Block <bblock@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
d1ea9b58c8
commit
d01fad2c6a
@ -337,7 +337,6 @@ struct qdio_initialize {
|
||||
qdio_handler_t *input_handler;
|
||||
qdio_handler_t *output_handler;
|
||||
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
|
||||
unsigned int scan_threshold;
|
||||
unsigned long int_parm;
|
||||
struct qdio_buffer ***input_sbal_addr_array;
|
||||
struct qdio_buffer ***output_sbal_addr_array;
|
||||
@ -350,7 +349,6 @@ struct qdio_initialize {
|
||||
|
||||
#define QDIO_FLAG_SYNC_INPUT 0x01
|
||||
#define QDIO_FLAG_SYNC_OUTPUT 0x02
|
||||
#define QDIO_FLAG_PCI_OUT 0x10
|
||||
|
||||
int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
|
||||
void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
|
||||
@ -367,7 +365,6 @@ extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
|
||||
unsigned int bufnr, unsigned int count, struct qaob *aob);
|
||||
extern int qdio_start_irq(struct ccw_device *cdev);
|
||||
extern int qdio_stop_irq(struct ccw_device *cdev);
|
||||
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
|
||||
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
|
||||
bool is_input, unsigned int *bufnr,
|
||||
unsigned int *error);
|
||||
|
@ -138,9 +138,6 @@ struct siga_flag {
|
||||
struct qdio_dev_perf_stat {
|
||||
unsigned int adapter_int;
|
||||
unsigned int qdio_int;
|
||||
unsigned int pci_request_int;
|
||||
|
||||
unsigned int tasklet_outbound;
|
||||
|
||||
unsigned int siga_read;
|
||||
unsigned int siga_write;
|
||||
@ -150,7 +147,6 @@ struct qdio_dev_perf_stat {
|
||||
unsigned int stop_polling;
|
||||
unsigned int inbound_queue_full;
|
||||
unsigned int outbound_call;
|
||||
unsigned int outbound_handler;
|
||||
unsigned int outbound_queue_full;
|
||||
unsigned int fast_requeue;
|
||||
unsigned int target_full;
|
||||
@ -180,12 +176,6 @@ struct qdio_input_q {
|
||||
};
|
||||
|
||||
struct qdio_output_q {
|
||||
/* PCIs are enabled for the queue */
|
||||
int pci_out_enabled;
|
||||
/* timer to check for more outbound work */
|
||||
struct timer_list timer;
|
||||
/* tasklet to check for completions */
|
||||
struct tasklet_struct tasklet;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -263,7 +253,6 @@ struct qdio_irq {
|
||||
struct qdio_ssqd_desc ssqd_desc;
|
||||
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
|
||||
|
||||
unsigned int scan_threshold; /* used SBALs before tasklet schedule */
|
||||
int perf_stat_enabled;
|
||||
|
||||
struct qdr *qdr;
|
||||
@ -360,8 +349,6 @@ void qdio_thinint_exit(void);
|
||||
int test_nonshared_ind(struct qdio_irq *);
|
||||
|
||||
/* prototypes for setup */
|
||||
void qdio_outbound_tasklet(struct tasklet_struct *t);
|
||||
void qdio_outbound_timer(struct timer_list *t);
|
||||
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
||||
struct irb *irb);
|
||||
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
|
||||
|
@ -197,8 +197,6 @@ DEFINE_SHOW_ATTRIBUTE(ssqd);
|
||||
static char *qperf_names[] = {
|
||||
"Assumed adapter interrupts",
|
||||
"QDIO interrupts",
|
||||
"Requested PCIs",
|
||||
"Outbound tasklet runs",
|
||||
"SIGA read",
|
||||
"SIGA write",
|
||||
"SIGA sync",
|
||||
@ -206,7 +204,6 @@ static char *qperf_names[] = {
|
||||
"Inbound stop_polling",
|
||||
"Inbound queue full",
|
||||
"Outbound calls",
|
||||
"Outbound handler",
|
||||
"Outbound queue full",
|
||||
"Outbound fast_requeue",
|
||||
"Outbound target_full",
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/io.h>
|
||||
@ -373,18 +372,6 @@ static inline int qdio_siga_input(struct qdio_q *q)
|
||||
return (cc) ? -EIO : 0;
|
||||
}
|
||||
|
||||
#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
|
||||
#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
|
||||
|
||||
static inline void qdio_sync_queues(struct qdio_q *q)
|
||||
{
|
||||
/* PCI capable outbound queues will also be scanned so sync them too */
|
||||
if (pci_out_supported(q->irq_ptr))
|
||||
qdio_siga_sync_all(q);
|
||||
else
|
||||
qdio_siga_sync_q(q);
|
||||
}
|
||||
|
||||
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
||||
unsigned char *state)
|
||||
{
|
||||
@ -521,15 +508,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int qdio_tasklet_schedule(struct qdio_q *q)
|
||||
{
|
||||
if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
|
||||
tasklet_schedule(&q->u.out.tasklet);
|
||||
return 0;
|
||||
}
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
|
||||
unsigned int *error)
|
||||
{
|
||||
@ -595,12 +573,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
|
||||
}
|
||||
}
|
||||
|
||||
/* all buffers processed? */
|
||||
static inline int qdio_outbound_q_done(struct qdio_q *q)
|
||||
{
|
||||
return atomic_read(&q->nr_buf_used) == 0;
|
||||
}
|
||||
|
||||
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
|
||||
unsigned long aob)
|
||||
{
|
||||
@ -644,75 +616,6 @@ retry:
|
||||
return cc;
|
||||
}
|
||||
|
||||
void qdio_outbound_tasklet(struct tasklet_struct *t)
|
||||
{
|
||||
struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
|
||||
struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
|
||||
unsigned int start = q->first_to_check;
|
||||
unsigned int error = 0;
|
||||
int count;
|
||||
|
||||
qperf_inc(q, tasklet_outbound);
|
||||
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
|
||||
|
||||
count = get_outbound_buffer_frontier(q, start, &error);
|
||||
if (count) {
|
||||
q->first_to_check = add_buf(start, count);
|
||||
|
||||
if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
|
||||
qperf_inc(q, outbound_handler);
|
||||
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
|
||||
start, count);
|
||||
|
||||
q->handler(q->irq_ptr->cdev, error, q->nr, start,
|
||||
count, q->irq_ptr->int_parm);
|
||||
}
|
||||
}
|
||||
|
||||
if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
|
||||
!qdio_outbound_q_done(q))
|
||||
goto sched;
|
||||
|
||||
if (q->u.out.pci_out_enabled)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Now we know that queue type is either qeth without pci enabled
|
||||
* or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
|
||||
* is noticed and outbound_handler is called after some time.
|
||||
*/
|
||||
if (qdio_outbound_q_done(q))
|
||||
del_timer_sync(&q->u.out.timer);
|
||||
else
|
||||
if (!timer_pending(&q->u.out.timer) &&
|
||||
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
|
||||
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
|
||||
return;
|
||||
|
||||
sched:
|
||||
qdio_tasklet_schedule(q);
|
||||
}
|
||||
|
||||
void qdio_outbound_timer(struct timer_list *t)
|
||||
{
|
||||
struct qdio_q *q = from_timer(q, t, u.out.timer);
|
||||
|
||||
qdio_tasklet_schedule(q);
|
||||
}
|
||||
|
||||
static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
|
||||
{
|
||||
struct qdio_q *out;
|
||||
int i;
|
||||
|
||||
if (!pci_out_supported(irq) || !irq->scan_threshold)
|
||||
return;
|
||||
|
||||
for_each_output_queue(irq, out, i)
|
||||
if (!qdio_outbound_q_done(out))
|
||||
qdio_tasklet_schedule(out);
|
||||
}
|
||||
|
||||
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
|
||||
enum qdio_irq_states state)
|
||||
{
|
||||
@ -734,25 +637,11 @@ static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
|
||||
/* PCI interrupt handler */
|
||||
static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
int i;
|
||||
struct qdio_q *q;
|
||||
|
||||
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
||||
return;
|
||||
|
||||
qdio_deliver_irq(irq_ptr);
|
||||
irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
|
||||
|
||||
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
|
||||
return;
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
if (qdio_outbound_q_done(q))
|
||||
continue;
|
||||
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
|
||||
qdio_siga_sync_q(q);
|
||||
qdio_tasklet_schedule(q);
|
||||
}
|
||||
}
|
||||
|
||||
static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
|
||||
@ -879,17 +768,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
|
||||
|
||||
static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
del_timer_sync(&q->u.out.timer);
|
||||
tasklet_kill(&q->u.out.tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
|
||||
{
|
||||
struct ccw_device *cdev = irq->cdev;
|
||||
@ -949,12 +827,10 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
|
||||
}
|
||||
|
||||
/*
|
||||
* Indicate that the device is going down. Scheduling the queue
|
||||
* tasklets is forbidden from here on.
|
||||
* Indicate that the device is going down.
|
||||
*/
|
||||
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
|
||||
|
||||
qdio_shutdown_queues(irq_ptr);
|
||||
qdio_shutdown_debug_entries(irq_ptr);
|
||||
|
||||
rc = qdio_cancel_ccw(irq_ptr, how);
|
||||
@ -1238,12 +1114,10 @@ EXPORT_SYMBOL_GPL(qdio_activate);
|
||||
/**
|
||||
* handle_inbound - reset processed input buffers
|
||||
* @q: queue containing the buffers
|
||||
* @callflags: flags
|
||||
* @bufnr: first buffer to process
|
||||
* @count: how many buffers are emptied
|
||||
*/
|
||||
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
|
||||
int bufnr, int count)
|
||||
static int handle_inbound(struct qdio_q *q, int bufnr, int count)
|
||||
{
|
||||
int overlap;
|
||||
|
||||
@ -1269,16 +1143,13 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
|
||||
/**
|
||||
* handle_outbound - process filled outbound buffers
|
||||
* @q: queue containing the buffers
|
||||
* @callflags: flags
|
||||
* @bufnr: first buffer to process
|
||||
* @count: how many buffers are filled
|
||||
* @aob: asynchronous operation block
|
||||
*/
|
||||
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
||||
unsigned int bufnr, unsigned int count,
|
||||
static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
|
||||
struct qaob *aob)
|
||||
{
|
||||
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
|
||||
unsigned char state = 0;
|
||||
int used, rc = 0;
|
||||
|
||||
@ -1290,12 +1161,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
||||
if (used == QDIO_MAX_BUFFERS_PER_Q)
|
||||
qperf_inc(q, outbound_queue_full);
|
||||
|
||||
if (callflags & QDIO_FLAG_PCI_OUT) {
|
||||
q->u.out.pci_out_enabled = 1;
|
||||
qperf_inc(q, pci_request_int);
|
||||
} else
|
||||
q->u.out.pci_out_enabled = 0;
|
||||
|
||||
if (queue_type(q) == QDIO_IQDIO_QFMT) {
|
||||
unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
|
||||
|
||||
@ -1312,18 +1177,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
||||
rc = qdio_kick_outbound_q(q, count, 0);
|
||||
}
|
||||
|
||||
/* Let drivers implement their own completion scanning: */
|
||||
if (!scan_threshold)
|
||||
return rc;
|
||||
|
||||
/* in case of SIGA errors we must process the error immediately */
|
||||
if (used >= scan_threshold || rc)
|
||||
qdio_tasklet_schedule(q);
|
||||
else
|
||||
/* free the SBALs in case of no further traffic */
|
||||
if (!timer_pending(&q->u.out.timer) &&
|
||||
likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
|
||||
mod_timer(&q->u.out.timer, jiffies + HZ);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1355,11 +1208,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
|
||||
if (!count)
|
||||
return 0;
|
||||
if (callflags & QDIO_FLAG_SYNC_INPUT)
|
||||
return handle_inbound(irq_ptr->input_qs[q_nr],
|
||||
callflags, bufnr, count);
|
||||
return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
|
||||
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
|
||||
return handle_outbound(irq_ptr->output_qs[q_nr],
|
||||
callflags, bufnr, count, aob);
|
||||
return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(do_QDIO);
|
||||
@ -1446,45 +1297,6 @@ int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_inspect_queue);
|
||||
|
||||
/**
|
||||
* qdio_get_next_buffers - process input buffers
|
||||
* @cdev: associated ccw_device for the qdio subchannel
|
||||
* @nr: input queue number
|
||||
* @bufnr: first filled buffer number
|
||||
* @error: buffers are in error state
|
||||
*
|
||||
* Return codes
|
||||
* < 0 - error
|
||||
* = 0 - no new buffers found
|
||||
* > 0 - number of processed buffers
|
||||
*/
|
||||
int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
|
||||
int *error)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
||||
|
||||
if (!irq_ptr)
|
||||
return -ENODEV;
|
||||
q = irq_ptr->input_qs[nr];
|
||||
|
||||
/*
|
||||
* Cannot rely on automatic sync after interrupt since queues may
|
||||
* also be examined without interrupt.
|
||||
*/
|
||||
if (need_siga_sync(q))
|
||||
qdio_sync_queues(q);
|
||||
|
||||
qdio_check_outbound_pci_queues(irq_ptr);
|
||||
|
||||
/* Note: upper-layer MUST stop processing immediately here ... */
|
||||
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
||||
return -EIO;
|
||||
|
||||
return __qdio_inspect_queue(q, bufnr, error);
|
||||
}
|
||||
EXPORT_SYMBOL(qdio_get_next_buffers);
|
||||
|
||||
/**
|
||||
* qdio_stop_irq - disable interrupt processing for the device
|
||||
* @cdev: associated ccw_device for the qdio subchannel
|
||||
|
@ -267,9 +267,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
|
||||
q->is_input_q = 0;
|
||||
setup_storage_lists(q, irq_ptr,
|
||||
qdio_init->output_sbal_addr_array[i], i);
|
||||
|
||||
tasklet_setup(&q->u.out.tasklet, qdio_outbound_tasklet);
|
||||
timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -442,7 +439,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
|
||||
irq_ptr->int_parm = init_data->int_parm;
|
||||
irq_ptr->nr_input_qs = init_data->no_input_qs;
|
||||
irq_ptr->nr_output_qs = init_data->no_output_qs;
|
||||
irq_ptr->scan_threshold = init_data->scan_threshold;
|
||||
ccw_device_get_schid(cdev, &irq_ptr->schid);
|
||||
setup_queues(irq_ptr, init_data);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user