mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 21:24:00 +08:00
s390/qdio: remove internal polling in non-thinint path
For non-thinint devices in LPAR, qdio polls an idle Input Queue for a little while to catch more work. But platform support for thinints has been around practically _forever_ by now, so this micro-optimization is seeing 0 actual use. Remove it to reduce the overall complexity of the hot path. In the meantime we also grew support for driver-level polling (eg. NAPI in qeth), so it's quite questionable how useful this would actually be on current kernels. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
parent
2bbf282a5e
commit
a709423f7a
@ -15,7 +15,6 @@
|
|||||||
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
|
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
|
||||||
#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
|
#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
|
||||||
#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
|
#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
|
||||||
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
|
|
||||||
|
|
||||||
enum qdio_irq_states {
|
enum qdio_irq_states {
|
||||||
QDIO_IRQ_STATE_INACTIVE,
|
QDIO_IRQ_STATE_INACTIVE,
|
||||||
@ -181,8 +180,6 @@ struct qdio_input_q {
|
|||||||
/* Batch of SBALs that we processed while polling the queue: */
|
/* Batch of SBALs that we processed while polling the queue: */
|
||||||
unsigned int batch_start;
|
unsigned int batch_start;
|
||||||
unsigned int batch_count;
|
unsigned int batch_count;
|
||||||
/* last time of noticing incoming data */
|
|
||||||
u64 timestamp;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qdio_output_q {
|
struct qdio_output_q {
|
||||||
|
@ -510,14 +510,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
|
|||||||
|
|
||||||
static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
|
static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
|
||||||
{
|
{
|
||||||
int count;
|
return get_inbound_buffer_frontier(q, start);
|
||||||
|
|
||||||
count = get_inbound_buffer_frontier(q, start);
|
|
||||||
|
|
||||||
if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
|
|
||||||
q->u.in.timestamp = get_tod_clock();
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
|
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
|
||||||
@ -535,22 +528,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
|
|||||||
/* more work coming */
|
/* more work coming */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (is_thinint_irq(q->irq_ptr))
|
return 1;
|
||||||
return 1;
|
|
||||||
|
|
||||||
/* don't poll under z/VM */
|
|
||||||
if (MACHINE_IS_VM)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* At this point we know, that inbound first_to_check
|
|
||||||
* has (probably) not moved (see qdio_inbound_processing).
|
|
||||||
*/
|
|
||||||
if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
|
|
||||||
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
|
|
||||||
return 1;
|
|
||||||
} else
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
|
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
|
||||||
|
Loading…
Reference in New Issue
Block a user