mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-10 11:30:49 +00:00
drm/i915: Preallocate requests
By allocating the request prior to writing to the ringbuffer, we can abort the operation without leaving the GPU in an inconsistent state. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
5cd68c9864
commit
8dc5d14741
@ -992,8 +992,9 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
|
||||
int i915_gpu_idle(struct drm_device *dev);
|
||||
int i915_gem_idle(struct drm_device *dev);
|
||||
uint32_t i915_add_request(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct intel_ring_buffer *ring);
|
||||
struct drm_file *file_priv,
|
||||
struct drm_i915_gem_request *request,
|
||||
struct intel_ring_buffer *ring);
|
||||
int i915_do_wait_request(struct drm_device *dev,
|
||||
uint32_t seqno,
|
||||
bool interruptible,
|
||||
|
@ -1609,20 +1609,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
|
||||
uint32_t
|
||||
i915_add_request(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_i915_gem_request *request,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_file_private *i915_file_priv = NULL;
|
||||
struct drm_i915_gem_request *request;
|
||||
uint32_t seqno;
|
||||
int was_empty;
|
||||
|
||||
if (file_priv != NULL)
|
||||
i915_file_priv = file_priv->driver_priv;
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return 0;
|
||||
if (request == NULL) {
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return 0;
|
||||
}
|
||||
|
||||
seqno = ring->add_request(dev, ring, file_priv, 0);
|
||||
|
||||
@ -1839,7 +1841,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
|
||||
BUG_ON(seqno == 0);
|
||||
|
||||
if (seqno == dev_priv->next_seqno) {
|
||||
seqno = i915_add_request(dev, NULL, ring);
|
||||
seqno = i915_add_request(dev, NULL, NULL, ring);
|
||||
if (seqno == 0)
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -3505,8 +3507,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
static int
|
||||
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
@ -3518,6 +3519,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct drm_i915_gem_relocation_entry *relocs = NULL;
|
||||
struct drm_i915_gem_request *request = NULL;
|
||||
int ret = 0, ret2, i, pinned = 0;
|
||||
uint64_t exec_offset;
|
||||
uint32_t seqno, reloc_index;
|
||||
@ -3571,6 +3573,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
|
||||
&relocs);
|
||||
if (ret != 0)
|
||||
@ -3736,11 +3744,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (dev_priv->render_ring.outstanding_lazy_request) {
|
||||
(void)i915_add_request(dev, file_priv, &dev_priv->render_ring);
|
||||
(void)i915_add_request(dev, file_priv, NULL, &dev_priv->render_ring);
|
||||
dev_priv->render_ring.outstanding_lazy_request = false;
|
||||
}
|
||||
if (dev_priv->bsd_ring.outstanding_lazy_request) {
|
||||
(void)i915_add_request(dev, file_priv, &dev_priv->bsd_ring);
|
||||
(void)i915_add_request(dev, file_priv, NULL, &dev_priv->bsd_ring);
|
||||
dev_priv->bsd_ring.outstanding_lazy_request = false;
|
||||
}
|
||||
|
||||
@ -3810,7 +3818,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
* *some* interrupts representing completion of buffers that we can
|
||||
* wait on when trying to clear up gtt space).
|
||||
*/
|
||||
seqno = i915_add_request(dev, file_priv, ring);
|
||||
seqno = i915_add_request(dev, file_priv, request, ring);
|
||||
request = NULL;
|
||||
|
||||
#if WATCH_LRU
|
||||
i915_dump_lru(dev, __func__);
|
||||
@ -3849,6 +3858,7 @@ pre_mutex_err:
|
||||
|
||||
drm_free_large(object_list);
|
||||
kfree(cliprects);
|
||||
kfree(request);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -4199,7 +4209,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
*/
|
||||
if (obj->write_domain) {
|
||||
i915_gem_flush(dev, 0, obj->write_domain);
|
||||
(void)i915_add_request(dev, file_priv, obj_priv->ring);
|
||||
(void)i915_add_request(dev, file_priv, NULL, obj_priv->ring);
|
||||
}
|
||||
|
||||
/* Update the active list for the hardware's current position.
|
||||
|
@ -218,6 +218,7 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
|
||||
}
|
||||
|
||||
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
struct drm_i915_gem_request *request,
|
||||
bool interruptible,
|
||||
int stage)
|
||||
{
|
||||
@ -226,7 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
int ret;
|
||||
|
||||
overlay->last_flip_req =
|
||||
i915_add_request(dev, NULL, &dev_priv->render_ring);
|
||||
i915_add_request(dev, NULL, request, &dev_priv->render_ring);
|
||||
if (overlay->last_flip_req == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -246,11 +247,15 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
BUG_ON(overlay->active);
|
||||
|
||||
overlay->active = 1;
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
BEGIN_LP_RING(4);
|
||||
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
OUT_RING(overlay->flip_addr | OFC_UPDATE);
|
||||
@ -258,21 +263,26 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
OUT_RING(MI_NOOP);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, true,
|
||||
return intel_overlay_do_wait_request(overlay, request, true,
|
||||
NEEDS_WAIT_FOR_FLIP);
|
||||
}
|
||||
|
||||
/* overlay needs to be enabled in OCMD reg */
|
||||
static void intel_overlay_continue(struct intel_overlay *overlay,
|
||||
bool load_polyphase_filter)
|
||||
static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
bool load_polyphase_filter)
|
||||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
|
||||
BUG_ON(!overlay->active);
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (load_polyphase_filter)
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
@ -287,17 +297,23 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
overlay->last_flip_req =
|
||||
i915_add_request(dev, NULL, &dev_priv->render_ring);
|
||||
i915_add_request(dev, NULL, request, &dev_priv->render_ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* overlay needs to be disabled in OCMD reg */
|
||||
static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
{
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
struct drm_device *dev = overlay->dev;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
BUG_ON(!overlay->active);
|
||||
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* According to intel docs the overlay hw may hang (when switching
|
||||
* off) without loading the filter coeffs. It is however unclear whether
|
||||
* this applies to the disabling of the overlay or to the switching off
|
||||
@ -315,7 +331,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, true, SWITCH_OFF);
|
||||
return intel_overlay_do_wait_request(overlay, request, true,
|
||||
SWITCH_OFF);
|
||||
}
|
||||
|
||||
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
@ -397,13 +414,19 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
return 0;
|
||||
|
||||
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
/* synchronous slowpath */
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
BEGIN_LP_RING(2);
|
||||
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
OUT_RING(MI_NOOP);
|
||||
ADVANCE_LP_RING();
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay, true,
|
||||
ret = intel_overlay_do_wait_request(overlay, request, true,
|
||||
RELEASE_OLD_VID);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -755,7 +778,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
|
||||
intel_overlay_unmap_regs(overlay, regs);
|
||||
|
||||
intel_overlay_continue(overlay, scale_changed);
|
||||
ret = intel_overlay_continue(overlay, scale_changed);
|
||||
if (ret)
|
||||
goto out_unpin;
|
||||
|
||||
overlay->old_vid_bo = overlay->vid_bo;
|
||||
overlay->vid_bo = to_intel_bo(new_bo);
|
||||
|
Loading…
Reference in New Issue
Block a user