mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
drm/i915: Rename struct intel_ringbuffer to struct intel_ring
The state stored in this struct is not only the information about the buffer object, but the ring used to communicate with the hardware. Using buffer here is overly specific and, for me at least, conflates with the notion of buffer objects themselves. s/struct intel_ringbuffer/struct intel_ring/ s/enum intel_ring_hangcheck/enum intel_engine_hangcheck/ s/describe_ctx_ringbuf()/describe_ctx_ring()/ s/intel_ring_get_active_head()/intel_engine_get_active_head()/ s/intel_ring_sync_index()/intel_engine_sync_index()/ s/intel_ring_init_seqno()/intel_engine_init_seqno()/ s/ring_stuck()/engine_stuck()/ s/intel_cleanup_engine()/intel_engine_cleanup()/ s/intel_stop_engine()/intel_engine_stop()/ s/intel_pin_and_map_ringbuffer_obj()/intel_pin_and_map_ring()/ s/intel_unpin_ringbuffer()/intel_unpin_ring()/ s/intel_engine_create_ringbuffer()/intel_engine_create_ring()/ s/intel_ring_flush_all_caches()/intel_engine_flush_all_caches()/ s/intel_ring_invalidate_all_caches()/intel_engine_invalidate_all_caches()/ s/intel_ringbuffer_free()/intel_ring_free()/ Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-15-git-send-email-chris@chris-wilson.co.uk Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-4-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
dca33ecc5f
commit
7e37f889b5
@ -1419,7 +1419,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
acthd[id] = intel_ring_get_active_head(engine);
|
||||
acthd[id] = intel_engine_get_active_head(engine);
|
||||
seqno[id] = intel_engine_get_seqno(engine);
|
||||
}
|
||||
|
||||
@ -2036,12 +2036,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void describe_ctx_ringbuf(struct seq_file *m,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
|
||||
{
|
||||
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
|
||||
ringbuf->space, ringbuf->head, ringbuf->tail,
|
||||
ringbuf->last_retired_head);
|
||||
ring->space, ring->head, ring->tail,
|
||||
ring->last_retired_head);
|
||||
}
|
||||
|
||||
static int i915_context_status(struct seq_file *m, void *unused)
|
||||
@ -2086,7 +2085,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
||||
if (ce->state)
|
||||
describe_obj(m, ce->state);
|
||||
if (ce->ring)
|
||||
describe_ctx_ringbuf(m, ce->ring);
|
||||
describe_ctx_ring(m, ce->ring);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
|
@ -518,7 +518,7 @@ struct drm_i915_error_state {
|
||||
bool waiting;
|
||||
int num_waiters;
|
||||
int hangcheck_score;
|
||||
enum intel_ring_hangcheck_action hangcheck_action;
|
||||
enum intel_engine_hangcheck_action hangcheck_action;
|
||||
int num_requests;
|
||||
|
||||
/* our own tracking of ring head and tail */
|
||||
@ -894,7 +894,7 @@ struct i915_gem_context {
|
||||
|
||||
struct intel_context {
|
||||
struct drm_i915_gem_object *state;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
struct i915_vma *lrc_vma;
|
||||
uint32_t *lrc_reg_state;
|
||||
u64 lrc_desc;
|
||||
|
@ -2486,7 +2486,7 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
|
||||
|
||||
static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_ringbuffer *buffer;
|
||||
struct intel_ring *ring;
|
||||
|
||||
while (!list_empty(&engine->active_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
@ -2502,7 +2502,7 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
|
||||
* (lockless) lookup doesn't try and wait upon the request as we
|
||||
* reset it.
|
||||
*/
|
||||
intel_ring_init_seqno(engine, engine->last_submitted_seqno);
|
||||
intel_engine_init_seqno(engine, engine->last_submitted_seqno);
|
||||
|
||||
/*
|
||||
* Clear the execlists queue up before freeing the requests, as those
|
||||
@ -2541,9 +2541,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
|
||||
* upon reset is less than when we start. Do one more pass over
|
||||
* all the ringbuffers to reset last_retired_head.
|
||||
*/
|
||||
list_for_each_entry(buffer, &engine->buffers, link) {
|
||||
buffer->last_retired_head = buffer->tail;
|
||||
intel_ring_update_space(buffer);
|
||||
list_for_each_entry(ring, &engine->buffers, link) {
|
||||
ring->last_retired_head = ring->tail;
|
||||
intel_ring_update_space(ring);
|
||||
}
|
||||
|
||||
engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
|
||||
@ -2870,7 +2870,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
|
||||
i915_gem_object_retire_request(obj, from_req);
|
||||
} else {
|
||||
int idx = intel_ring_sync_index(from, to);
|
||||
int idx = intel_engine_sync_index(from, to);
|
||||
u32 seqno = i915_gem_request_get_seqno(from_req);
|
||||
|
||||
WARN_ON(!to_req);
|
||||
@ -4570,8 +4570,8 @@ int i915_gem_init(struct drm_device *dev)
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
|
||||
dev_priv->gt.cleanup_engine = intel_cleanup_engine;
|
||||
dev_priv->gt.stop_engine = intel_stop_engine;
|
||||
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
|
||||
dev_priv->gt.stop_engine = intel_engine_stop;
|
||||
} else {
|
||||
dev_priv->gt.execbuf_submit = intel_execlists_submission;
|
||||
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
|
||||
|
@ -174,7 +174,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
|
||||
WARN_ON(ce->pin_count);
|
||||
if (ce->ring)
|
||||
intel_ringbuffer_free(ce->ring);
|
||||
intel_ring_free(ce->ring);
|
||||
|
||||
i915_gem_object_put(ce->state);
|
||||
}
|
||||
@ -552,7 +552,7 @@ static inline int
|
||||
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||
const int num_rings =
|
||||
@ -655,7 +655,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
static int remap_l3(struct drm_i915_gem_request *req, int slice)
|
||||
{
|
||||
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int i, ret;
|
||||
|
||||
if (!remap_info)
|
||||
|
@ -1001,7 +1001,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
||||
* any residual writes from the previous batch.
|
||||
*/
|
||||
return intel_ring_invalidate_all_caches(req);
|
||||
return intel_engine_invalidate_all_caches(req);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
||||
static int
|
||||
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret, i;
|
||||
|
||||
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
|
||||
@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
|
||||
if (params->engine->id == RCS &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
struct intel_ringbuffer *ring = params->request->ring;
|
||||
struct intel_ring *ring = params->request->ring;
|
||||
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
|
@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
|
||||
unsigned entry,
|
||||
dma_addr_t addr)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
@ -1661,7 +1661,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
|
||||
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
|
||||
|
@ -244,7 +244,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
|
||||
|
||||
/* Finally reset hw state */
|
||||
for_each_engine(engine, dev_priv)
|
||||
intel_ring_init_seqno(engine, seqno);
|
||||
intel_engine_init_seqno(engine, seqno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -423,7 +423,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
bool flush_caches)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
u32 request_start;
|
||||
u32 reserved_tail;
|
||||
int ret;
|
||||
@ -454,7 +454,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
if (i915.enable_execlists)
|
||||
ret = logical_ring_flush_all_caches(request);
|
||||
else
|
||||
ret = intel_ring_flush_all_caches(request);
|
||||
ret = intel_engine_flush_all_caches(request);
|
||||
/* Not allowed to fail! */
|
||||
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ struct drm_i915_gem_request {
|
||||
*/
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
struct intel_signal_node signaling;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
|
@ -221,7 +221,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
||||
}
|
||||
}
|
||||
|
||||
static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
|
||||
static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
|
||||
{
|
||||
switch (a) {
|
||||
case HANGCHECK_IDLE:
|
||||
@ -879,7 +879,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
|
||||
signal_offset =
|
||||
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
|
||||
tmp = error->semaphore_obj->pages[0];
|
||||
idx = intel_ring_sync_index(engine, to);
|
||||
idx = intel_engine_sync_index(engine, to);
|
||||
|
||||
ee->semaphore_mboxes[idx] = tmp[signal_offset];
|
||||
ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
|
||||
@ -981,7 +981,7 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
|
||||
|
||||
ee->waiting = intel_engine_has_waiter(engine);
|
||||
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
|
||||
ee->acthd = intel_ring_get_active_head(engine);
|
||||
ee->acthd = intel_engine_get_active_head(engine);
|
||||
ee->seqno = intel_engine_get_seqno(engine);
|
||||
ee->last_seqno = engine->last_submitted_seqno;
|
||||
ee->start = I915_READ_START(engine);
|
||||
@ -1097,7 +1097,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request) {
|
||||
struct i915_address_space *vm;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
|
||||
vm = request->ctx->ppgtt ?
|
||||
&request->ctx->ppgtt->base : &ggtt->base;
|
||||
|
@ -2993,7 +2993,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
|
||||
return stuck;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
static enum intel_engine_hangcheck_action
|
||||
head_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
{
|
||||
if (acthd != engine->hangcheck.acthd) {
|
||||
@ -3011,11 +3011,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
return HANGCHECK_HUNG;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
static enum intel_engine_hangcheck_action
|
||||
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
enum intel_ring_hangcheck_action ha;
|
||||
enum intel_engine_hangcheck_action ha;
|
||||
u32 tmp;
|
||||
|
||||
ha = head_stuck(engine, acthd);
|
||||
@ -3124,7 +3124,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
acthd = intel_ring_get_active_head(engine);
|
||||
acthd = intel_engine_get_active_head(engine);
|
||||
seqno = intel_engine_get_seqno(engine);
|
||||
|
||||
/* Reset stuck interrupts between batch advances */
|
||||
@ -3154,8 +3154,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
* being repeatedly kicked and so responsible
|
||||
* for stalling the machine.
|
||||
*/
|
||||
engine->hangcheck.action = ring_stuck(engine,
|
||||
acthd);
|
||||
engine->hangcheck.action =
|
||||
engine_stuck(engine, acthd);
|
||||
|
||||
switch (engine->hangcheck.action) {
|
||||
case HANGCHECK_IDLE:
|
||||
|
@ -11115,7 +11115,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
@ -11149,7 +11149,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
@ -11180,7 +11180,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
@ -11218,7 +11218,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
@ -11253,7 +11253,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t plane_bit = 0;
|
||||
int len, ret;
|
||||
|
@ -155,7 +155,7 @@ cleanup:
|
||||
if (i915.enable_execlists)
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[i]);
|
||||
else
|
||||
intel_cleanup_engine(&dev_priv->engine[i]);
|
||||
intel_engine_cleanup(&dev_priv->engine[i]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -767,7 +767,7 @@ err_unpin:
|
||||
static int
|
||||
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
struct intel_ring *ring = request->ring;
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
|
||||
intel_ring_advance(ring);
|
||||
@ -818,7 +818,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_ringbuffer *ring = params->request->ring;
|
||||
struct intel_ring *ring = params->request->ring;
|
||||
u64 exec_start;
|
||||
int instp_mode;
|
||||
u32 instp_mask;
|
||||
@ -973,7 +973,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
||||
|
||||
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
||||
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
|
||||
ret = intel_pin_and_map_ring(dev_priv, ce->ring);
|
||||
if (ret)
|
||||
goto unpin_map;
|
||||
|
||||
@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
|
||||
if (--ce->pin_count)
|
||||
return;
|
||||
|
||||
intel_unpin_ringbuffer_obj(ce->ring);
|
||||
intel_unpin_ring(ce->ring);
|
||||
|
||||
i915_gem_object_unpin_map(ce->state);
|
||||
i915_gem_object_ggtt_unpin(ce->state);
|
||||
@ -1027,7 +1027,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ret, i;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct i915_workarounds *w = &req->i915->workarounds;
|
||||
|
||||
if (w->count == 0)
|
||||
@ -1550,7 +1550,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
|
||||
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
|
||||
int i, ret;
|
||||
@ -1578,7 +1578,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
|
||||
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
||||
u64 offset, unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
|
||||
@ -1635,8 +1635,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
|
||||
u32 invalidate_domains,
|
||||
u32 unused)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
uint32_t cmd;
|
||||
struct intel_ring *ring = request->ring;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(request, 4);
|
||||
@ -1673,7 +1673,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
struct intel_ring *ring = request->ring;
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
bool vf_flush_wa = false, dc_flush_wa = false;
|
||||
@ -1787,7 +1787,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
|
||||
|
||||
static int gen8_emit_request(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
struct intel_ring *ring = request->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
|
||||
@ -1810,7 +1810,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
|
||||
|
||||
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_ringbuffer *ring = request->ring;
|
||||
struct intel_ring *ring = request->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
|
||||
@ -2162,7 +2162,7 @@ static int
|
||||
populate_lr_context(struct i915_gem_context *ctx,
|
||||
struct drm_i915_gem_object *ctx_obj,
|
||||
struct intel_engine_cs *engine,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
struct intel_ring *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ctx->i915;
|
||||
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
||||
@ -2215,7 +2215,7 @@ populate_lr_context(struct i915_gem_context *ctx,
|
||||
RING_START(engine->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
|
||||
RING_CTL(engine->mmio_base),
|
||||
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
|
||||
((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
|
||||
RING_BBADDR_UDW(engine->mmio_base), 0);
|
||||
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
|
||||
@ -2343,7 +2343,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
||||
struct drm_i915_gem_object *ctx_obj;
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
uint32_t context_size;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
WARN_ON(ce->state);
|
||||
@ -2359,7 +2359,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
||||
return PTR_ERR(ctx_obj);
|
||||
}
|
||||
|
||||
ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
|
||||
ring = intel_engine_create_ring(engine, ctx->ring_size);
|
||||
if (IS_ERR(ring)) {
|
||||
ret = PTR_ERR(ring);
|
||||
goto error_deref_obj;
|
||||
@ -2378,7 +2378,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
||||
return 0;
|
||||
|
||||
error_ring_free:
|
||||
intel_ringbuffer_free(ring);
|
||||
intel_ring_free(ring);
|
||||
error_deref_obj:
|
||||
i915_gem_object_put(ctx_obj);
|
||||
ce->ring = NULL;
|
||||
|
@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
|
||||
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
enum intel_engine_id engine = req->engine->id;
|
||||
unsigned int index;
|
||||
int ret;
|
||||
@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
|
||||
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
|
@ -235,7 +235,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
WARN_ON(overlay->active);
|
||||
@ -270,7 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
@ -340,7 +340,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
int ret;
|
||||
|
||||
@ -426,7 +426,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||
/* synchronous slowpath */
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req))
|
||||
|
@ -47,7 +47,7 @@ int __intel_ring_space(int head, int tail, int size)
|
||||
return space - I915_RING_FREE_SPACE;
|
||||
}
|
||||
|
||||
void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
|
||||
void intel_ring_update_space(struct intel_ring *ringbuf)
|
||||
{
|
||||
if (ringbuf->last_retired_head != -1) {
|
||||
ringbuf->head = ringbuf->last_retired_head;
|
||||
@ -60,9 +60,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
|
||||
|
||||
static void __intel_engine_submit(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = engine->buffer;
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
engine->write_tail(engine, ringbuf->tail);
|
||||
struct intel_ring *ring = engine->buffer;
|
||||
|
||||
ring->tail &= ring->size - 1;
|
||||
engine->write_tail(engine, ring->tail);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -70,7 +71,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
||||
@ -97,7 +98,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
u32 cmd;
|
||||
int ret;
|
||||
|
||||
@ -187,7 +188,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
static int
|
||||
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
u32 scratch_addr =
|
||||
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
int ret;
|
||||
@ -224,7 +225,7 @@ static int
|
||||
gen6_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
u32 scratch_addr =
|
||||
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
u32 flags = 0;
|
||||
@ -277,7 +278,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
static int
|
||||
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
@ -299,7 +300,7 @@ static int
|
||||
gen7_render_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains, u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
u32 scratch_addr =
|
||||
req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
|
||||
u32 flags = 0;
|
||||
@ -364,7 +365,7 @@ static int
|
||||
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
|
||||
u32 flags, u32 scratch_addr)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 6);
|
||||
@ -427,7 +428,7 @@ static void ring_write_tail(struct intel_engine_cs *engine,
|
||||
I915_WRITE_TAIL(engine, value);
|
||||
}
|
||||
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
|
||||
u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
u64 acthd;
|
||||
@ -553,8 +554,8 @@ static bool stop_ring(struct intel_engine_cs *engine)
|
||||
static int init_ring_common(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct intel_ringbuffer *ringbuf = engine->buffer;
|
||||
struct drm_i915_gem_object *obj = ringbuf->obj;
|
||||
struct intel_ring *ring = engine->buffer;
|
||||
struct drm_i915_gem_object *obj = ring->obj;
|
||||
int ret = 0;
|
||||
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
@ -604,7 +605,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
||||
(void)I915_READ_HEAD(engine);
|
||||
|
||||
I915_WRITE_CTL(engine,
|
||||
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
|
||||
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
|
||||
| RING_VALID);
|
||||
|
||||
/* If the head is still not zero, the ring is dead */
|
||||
@ -623,10 +624,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ringbuf->last_retired_head = -1;
|
||||
ringbuf->head = I915_READ_HEAD(engine);
|
||||
ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
|
||||
intel_ring_update_space(ringbuf);
|
||||
ring->last_retired_head = -1;
|
||||
ring->head = I915_READ_HEAD(engine);
|
||||
ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
|
||||
intel_ring_update_space(ring);
|
||||
|
||||
intel_engine_init_hangcheck(engine);
|
||||
|
||||
@ -680,7 +681,7 @@ err:
|
||||
|
||||
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct i915_workarounds *w = &req->i915->workarounds;
|
||||
int ret, i;
|
||||
|
||||
@ -688,7 +689,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
return 0;
|
||||
|
||||
req->engine->gpu_caches_dirty = true;
|
||||
ret = intel_ring_flush_all_caches(req);
|
||||
ret = intel_engine_flush_all_caches(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -706,7 +707,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
intel_ring_advance(ring);
|
||||
|
||||
req->engine->gpu_caches_dirty = true;
|
||||
ret = intel_ring_flush_all_caches(req);
|
||||
ret = intel_engine_flush_all_caches(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1338,7 +1339,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 8
|
||||
struct intel_ringbuffer *signaller = signaller_req->ring;
|
||||
struct intel_ring *signaller = signaller_req->ring;
|
||||
struct drm_i915_private *dev_priv = signaller_req->i915;
|
||||
struct intel_engine_cs *waiter;
|
||||
enum intel_engine_id id;
|
||||
@ -1380,7 +1381,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
#define MBOX_UPDATE_DWORDS 6
|
||||
struct intel_ringbuffer *signaller = signaller_req->ring;
|
||||
struct intel_ring *signaller = signaller_req->ring;
|
||||
struct drm_i915_private *dev_priv = signaller_req->i915;
|
||||
struct intel_engine_cs *waiter;
|
||||
enum intel_engine_id id;
|
||||
@ -1419,7 +1420,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
|
||||
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
|
||||
unsigned int num_dwords)
|
||||
{
|
||||
struct intel_ringbuffer *signaller = signaller_req->ring;
|
||||
struct intel_ring *signaller = signaller_req->ring;
|
||||
struct drm_i915_private *dev_priv = signaller_req->i915;
|
||||
struct intel_engine_cs *useless;
|
||||
enum intel_engine_id id;
|
||||
@ -1464,7 +1465,7 @@ static int
|
||||
gen6_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (engine->semaphore.signal)
|
||||
@ -1488,7 +1489,7 @@ static int
|
||||
gen8_render_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (engine->semaphore.signal)
|
||||
@ -1533,7 +1534,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
|
||||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_ringbuffer *waiter = waiter_req->ring;
|
||||
struct intel_ring *waiter = waiter_req->ring;
|
||||
struct drm_i915_private *dev_priv = waiter_req->i915;
|
||||
u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
@ -1567,7 +1568,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
|
||||
struct intel_engine_cs *signaller,
|
||||
u32 seqno)
|
||||
{
|
||||
struct intel_ringbuffer *waiter = waiter_req->ring;
|
||||
struct intel_ring *waiter = waiter_req->ring;
|
||||
u32 dw1 = MI_SEMAPHORE_MBOX |
|
||||
MI_SEMAPHORE_COMPARE |
|
||||
MI_SEMAPHORE_REGISTER;
|
||||
@ -1701,7 +1702,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
@ -1717,7 +1718,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
|
||||
static int
|
||||
i9xx_add_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
@ -1795,7 +1796,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 length,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
@ -1822,7 +1823,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
u32 cs_offset = req->engine->scratch.gtt_offset;
|
||||
int ret;
|
||||
|
||||
@ -1884,7 +1885,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
@ -1992,7 +1993,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
void intel_unpin_ring(struct intel_ring *ringbuf)
|
||||
{
|
||||
GEM_BUG_ON(!ringbuf->vma);
|
||||
GEM_BUG_ON(!ringbuf->vaddr);
|
||||
@ -2007,8 +2008,8 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
ringbuf->vma = NULL;
|
||||
}
|
||||
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring *ringbuf)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = ringbuf->obj;
|
||||
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
||||
@ -2060,14 +2061,14 @@ err_unpin:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
|
||||
{
|
||||
i915_gem_object_put(ringbuf->obj);
|
||||
ringbuf->obj = NULL;
|
||||
}
|
||||
|
||||
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
struct intel_ring *ringbuf)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
@ -2087,10 +2088,10 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct intel_ringbuffer *
|
||||
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||
struct intel_ring *
|
||||
intel_engine_create_ring(struct intel_engine_cs *engine, int size)
|
||||
{
|
||||
struct intel_ringbuffer *ring;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
@ -2128,7 +2129,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||
}
|
||||
|
||||
void
|
||||
intel_ringbuffer_free(struct intel_ringbuffer *ring)
|
||||
intel_ring_free(struct intel_ring *ring)
|
||||
{
|
||||
intel_destroy_ringbuffer_obj(ring);
|
||||
list_del(&ring->link);
|
||||
@ -2189,7 +2190,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
|
||||
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
struct intel_ring *ringbuf;
|
||||
int ret;
|
||||
|
||||
WARN_ON(engine->buffer);
|
||||
@ -2214,7 +2215,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
|
||||
ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
|
||||
if (IS_ERR(ringbuf)) {
|
||||
ret = PTR_ERR(ringbuf);
|
||||
goto error;
|
||||
@ -2232,7 +2233,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
|
||||
ret = intel_pin_and_map_ring(dev_priv, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
||||
engine->name, ret);
|
||||
@ -2243,11 +2244,11 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
||||
return 0;
|
||||
|
||||
error:
|
||||
intel_cleanup_engine(engine);
|
||||
intel_engine_cleanup(engine);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_cleanup_engine(struct intel_engine_cs *engine)
|
||||
void intel_engine_cleanup(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
|
||||
@ -2257,11 +2258,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
|
||||
dev_priv = engine->i915;
|
||||
|
||||
if (engine->buffer) {
|
||||
intel_stop_engine(engine);
|
||||
intel_engine_stop(engine);
|
||||
WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
|
||||
|
||||
intel_unpin_ringbuffer_obj(engine->buffer);
|
||||
intel_ringbuffer_free(engine->buffer);
|
||||
intel_unpin_ring(engine->buffer);
|
||||
intel_ring_free(engine->buffer);
|
||||
engine->buffer = NULL;
|
||||
}
|
||||
|
||||
@ -2324,7 +2325,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
|
||||
|
||||
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_gem_request *target;
|
||||
|
||||
@ -2369,7 +2370,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
|
||||
|
||||
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int remain_actual = ring->size - ring->tail;
|
||||
int remain_usable = ring->effective_size - ring->tail;
|
||||
int bytes = num_dwords * sizeof(u32);
|
||||
@ -2426,7 +2427,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
||||
/* Align the ring tail to a cacheline boundary */
|
||||
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int num_dwords =
|
||||
(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||
int ret;
|
||||
@ -2447,7 +2448,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
@ -2533,7 +2534,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
|
||||
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
@ -2579,7 +2580,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
bool ppgtt = USES_PPGTT(req->i915) &&
|
||||
!(dispatch_flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
@ -2605,7 +2606,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
@ -2630,7 +2631,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 len,
|
||||
unsigned dispatch_flags)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 2);
|
||||
@ -2653,7 +2654,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
static int gen6_ring_flush(struct drm_i915_gem_request *req,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct intel_ringbuffer *ring = req->ring;
|
||||
struct intel_ring *ring = req->ring;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
@ -2970,7 +2971,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
||||
intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
int ret;
|
||||
@ -2989,7 +2990,7 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
|
||||
}
|
||||
|
||||
int
|
||||
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
|
||||
intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
uint32_t flush_domains;
|
||||
@ -3009,8 +3010,7 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
intel_stop_engine(struct intel_engine_cs *engine)
|
||||
void intel_engine_stop(struct intel_engine_cs *engine)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -62,7 +62,7 @@ struct intel_hw_status_page {
|
||||
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
|
||||
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
|
||||
|
||||
enum intel_ring_hangcheck_action {
|
||||
enum intel_engine_hangcheck_action {
|
||||
HANGCHECK_IDLE = 0,
|
||||
HANGCHECK_WAIT,
|
||||
HANGCHECK_ACTIVE,
|
||||
@ -72,17 +72,17 @@ enum intel_ring_hangcheck_action {
|
||||
|
||||
#define HANGCHECK_SCORE_RING_HUNG 31
|
||||
|
||||
struct intel_ring_hangcheck {
|
||||
struct intel_engine_hangcheck {
|
||||
u64 acthd;
|
||||
unsigned long user_interrupts;
|
||||
u32 seqno;
|
||||
int score;
|
||||
enum intel_ring_hangcheck_action action;
|
||||
enum intel_engine_hangcheck_action action;
|
||||
int deadlock;
|
||||
u32 instdone[I915_NUM_INSTDONE_REG];
|
||||
};
|
||||
|
||||
struct intel_ringbuffer {
|
||||
struct intel_ring {
|
||||
struct drm_i915_gem_object *obj;
|
||||
void *vaddr;
|
||||
struct i915_vma *vma;
|
||||
@ -149,7 +149,7 @@ struct intel_engine_cs {
|
||||
u64 fence_context;
|
||||
u32 mmio_base;
|
||||
unsigned int irq_shift;
|
||||
struct intel_ringbuffer *buffer;
|
||||
struct intel_ring *buffer;
|
||||
struct list_head buffers;
|
||||
|
||||
/* Rather than have every client wait upon all user interrupts,
|
||||
@ -329,7 +329,7 @@ struct intel_engine_cs {
|
||||
|
||||
struct i915_gem_context *last_context;
|
||||
|
||||
struct intel_ring_hangcheck hangcheck;
|
||||
struct intel_engine_hangcheck hangcheck;
|
||||
|
||||
struct {
|
||||
struct drm_i915_gem_object *obj;
|
||||
@ -376,8 +376,8 @@ intel_engine_flag(const struct intel_engine_cs *engine)
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_ring_sync_index(struct intel_engine_cs *engine,
|
||||
struct intel_engine_cs *other)
|
||||
intel_engine_sync_index(struct intel_engine_cs *engine,
|
||||
struct intel_engine_cs *other)
|
||||
{
|
||||
int idx;
|
||||
|
||||
@ -439,45 +439,44 @@ intel_write_status_page(struct intel_engine_cs *engine,
|
||||
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
|
||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
|
||||
struct intel_ringbuffer *
|
||||
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
|
||||
struct intel_ringbuffer *ringbuf);
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
||||
struct intel_ring *
|
||||
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
|
||||
int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring *ring);
|
||||
void intel_unpin_ring(struct intel_ring *ring);
|
||||
void intel_ring_free(struct intel_ring *ring);
|
||||
|
||||
void intel_stop_engine(struct intel_engine_cs *engine);
|
||||
void intel_cleanup_engine(struct intel_engine_cs *engine);
|
||||
void intel_engine_stop(struct intel_engine_cs *engine);
|
||||
void intel_engine_cleanup(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
|
||||
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
|
||||
|
||||
static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data)
|
||||
static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
|
||||
{
|
||||
*(uint32_t *)(ring->vaddr + ring->tail) = data;
|
||||
ring->tail += 4;
|
||||
}
|
||||
|
||||
static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring,
|
||||
i915_reg_t reg)
|
||||
static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
|
||||
{
|
||||
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
|
||||
}
|
||||
|
||||
static inline void intel_ring_advance(struct intel_ringbuffer *ring)
|
||||
static inline void intel_ring_advance(struct intel_ring *ring)
|
||||
{
|
||||
ring->tail &= ring->size - 1;
|
||||
}
|
||||
|
||||
int __intel_ring_space(int head, int tail, int size);
|
||||
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
|
||||
void intel_ring_update_space(struct intel_ring *ringbuf);
|
||||
|
||||
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
|
||||
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
|
||||
|
||||
int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
|
||||
void intel_fini_pipe_control(struct intel_engine_cs *engine);
|
||||
@ -491,7 +490,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
|
||||
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
|
||||
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
|
||||
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
|
||||
@ -499,7 +498,7 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
|
||||
|
||||
int init_workarounds_ring(struct intel_engine_cs *engine);
|
||||
|
||||
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
|
||||
static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
|
||||
{
|
||||
return ringbuf->tail;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user