mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-10 11:30:49 +00:00
amdgpu, nouveau, i915 and exynos fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJZ6Y+3AAoJEAx081l5xIa+dEIP/j5tjawx4ada7etPbAblFjmD cs0LUFM6SgamUtgi5slJn+3jV1bXJpgcmCXI0S6Ln7xjLLvjcTnauJFidrBgT1lG dJOrzz8zgBF2jpc0oC/a3ercum3vncqxAbUbDQ6RwDrxuhwBB3L31ix263z/+Yr8 fy655TSpxTiaokiVxws2mOfpte9yWcSeQUChrzRjNSS480O4qd/AJZl+FhiBNySw 2kAW9zbTFEV/9qfANHToAeUsCOo2VoTBXqgpvPiUKJG2gow8yT0Wf5Al4HcagZCt 9JtIbNlHxnuSYR0CCfx4r+/igWQt52O2GgmlPkaxvGjVz13n9m9RngYyyZksZWjQ 9wBJEQI4pvCUIBHpic+90b9AKTIub52bImigNzckEDvvXXEIO4SUc23s6pkOKXD8 V4ZO+pHtzV6/T9vGpntrbODcwp7/h397Zr6SKP+YjRl12Gh5/922xz3bQDnEgYF/ o/q/stEm5Jqncz4WR+wZ2a7gocPxuexv6T/hZwvT+08l/QR5DR0sUhlJ+j3QaSqk ELqn5tNEGQiOEDLHmBnjiHZJFYAAlbOkPoVPIMuFt/xRZtDb2ZCN457OgSubmPqw 1i00XLd6ZoEVI+nBcGJbyQhqZmcTftWXXvcCixBqvK9mEYFqQoe222bytaIoiPpo WT3Zt2E6jgoQxtX3PftU =wFHG -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.14-rc6' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "Standard fixes pull for rc6: one regression fix for amdgpu, a bunch of nouveau fixes that I'd missed a pull req for from Ben last week, some exynos regression fixes, and a few fixes for i915" * tag 'drm-fixes-for-v4.14-rc6' of git://people.freedesktop.org/~airlied/linux: drm/nouveau/fbcon: fix oops without fbdev emulation Revert "drm/amdgpu: discard commands of killed processes" drm/i915: Use a mask when applying WaProgramL3SqcReg1Default drm/i915: Report -EFAULT before pwrite fast path into shmemfs drm/i915/cnl: Fix PLL initialization for HDMI. drm/i915/cnl: Fix PLL mapping. drm/i915: Use bdw_ddi_translations_fdi for Broadwell drm/i915: Fix eviction when the GGTT is idle but full drm/i915/gvt: Fix GPU hang after reusing vGPU instance across different guest OS drm/exynos: Clear drvdata after component unbind drm/exynos: Fix potential NULL pointer dereference in suspend/resume paths drm/nouveau/kms/nv50: fix oops during DP IRQ handling on non-MST boards drm/nouveau/bsp/g92: disable by default drm/nouveau/mmu: flush tlbs before deleting page tables
This commit is contained in:
commit
d92116a089
@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_entity *entity)
|
||||
{
|
||||
struct amd_sched_rq *rq = entity->rq;
|
||||
int r;
|
||||
|
||||
if (!amd_sched_entity_is_initialized(sched, entity))
|
||||
return;
|
||||
|
||||
/**
|
||||
* The client will not queue more IBs during this fini, consume existing
|
||||
* queued IBs or discard them on SIGKILL
|
||||
* queued IBs
|
||||
*/
|
||||
if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
|
||||
r = -ERESTARTSYS;
|
||||
else
|
||||
r = wait_event_killable(sched->job_scheduled,
|
||||
amd_sched_entity_is_idle(entity));
|
||||
wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
|
||||
|
||||
amd_sched_rq_remove_entity(rq, entity);
|
||||
if (r) {
|
||||
struct amd_sched_job *job;
|
||||
|
||||
/* Park the kernel for a moment to make sure it isn't processing
|
||||
* our enity.
|
||||
*/
|
||||
kthread_park(sched->thread);
|
||||
kthread_unpark(sched->thread);
|
||||
while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
|
||||
sched->ops->free_job(job);
|
||||
|
||||
}
|
||||
kfifo_free(&entity->job_queue);
|
||||
}
|
||||
|
||||
|
@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
|
||||
static int exynos_drm_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct exynos_drm_private *private = drm_dev->dev_private;
|
||||
struct exynos_drm_private *private;
|
||||
|
||||
if (pm_runtime_suspended(dev) || !drm_dev)
|
||||
return 0;
|
||||
|
||||
private = drm_dev->dev_private;
|
||||
|
||||
drm_kms_helper_poll_disable(drm_dev);
|
||||
exynos_drm_fbdev_suspend(drm_dev);
|
||||
private->suspend_state = drm_atomic_helper_suspend(drm_dev);
|
||||
@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
|
||||
static int exynos_drm_resume(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct exynos_drm_private *private = drm_dev->dev_private;
|
||||
struct exynos_drm_private *private;
|
||||
|
||||
if (pm_runtime_suspended(dev) || !drm_dev)
|
||||
return 0;
|
||||
|
||||
private = drm_dev->dev_private;
|
||||
drm_atomic_helper_resume(drm_dev, private->suspend_state);
|
||||
exynos_drm_fbdev_resume(drm_dev);
|
||||
drm_kms_helper_poll_enable(drm_dev);
|
||||
@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
|
||||
|
||||
kfree(drm->dev_private);
|
||||
drm->dev_private = NULL;
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
||||
drm_dev_unref(drm);
|
||||
}
|
||||
|
@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
||||
|
||||
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
|
||||
kfree(vgpu->sched_data);
|
||||
vgpu->sched_data = NULL;
|
||||
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
|
||||
scheduler->engine_owner[ring_id] = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
}
|
||||
|
||||
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||
@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
int ring_id;
|
||||
|
||||
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
||||
|
||||
@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
scheduler->need_reschedule = true;
|
||||
scheduler->current_vgpu = NULL;
|
||||
}
|
||||
|
||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
|
||||
scheduler->engine_owner[ring_id] = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
}
|
||||
|
@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
if (READ_ONCE(obj->mm.pages))
|
||||
return -ENODEV;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||
return -EFAULT;
|
||||
|
||||
/* Before the pages are instantiated the object is treated as being
|
||||
* in the CPU domain. The pages will be clflushed as required before
|
||||
* use, and we can freely write into the pages directly. If userspace
|
||||
|
@ -33,21 +33,20 @@
|
||||
#include "intel_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
|
||||
static bool ggtt_is_idle(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct intel_timeline *tl;
|
||||
if (i915->gt.active_requests)
|
||||
return false;
|
||||
|
||||
tl = &ggtt->base.timeline.engine[engine->id];
|
||||
if (i915_gem_active_isset(&tl->last_request))
|
||||
return false;
|
||||
}
|
||||
for_each_engine(engine, i915, id) {
|
||||
if (engine->last_retired_context != i915->kernel_context)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ggtt_flush(struct drm_i915_private *i915)
|
||||
@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
||||
min_size, alignment, cache_level,
|
||||
start, end, mode);
|
||||
|
||||
/* Retire before we search the active list. Although we have
|
||||
/*
|
||||
* Retire before we search the active list. Although we have
|
||||
* reasonable accuracy in our retirement lists, we may have
|
||||
* a stray pin (preventing eviction) that can only be resolved by
|
||||
* retiring.
|
||||
@ -182,7 +182,8 @@ search_again:
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
/* Can we unpin some objects such as idle hw contents,
|
||||
/*
|
||||
* Can we unpin some objects such as idle hw contents,
|
||||
* or pending flips? But since only the GGTT has global entries
|
||||
* such as scanouts, rinbuffers and contexts, we can skip the
|
||||
* purge when inspecting per-process local address spaces.
|
||||
@ -190,19 +191,33 @@ search_again:
|
||||
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
|
||||
return -ENOSPC;
|
||||
|
||||
if (ggtt_is_idle(dev_priv)) {
|
||||
/* If we still have pending pageflip completions, drop
|
||||
* back to userspace to give our workqueues time to
|
||||
* acquire our locks and unpin the old scanouts.
|
||||
*/
|
||||
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
||||
/*
|
||||
* Not everything in the GGTT is tracked via VMA using
|
||||
* i915_vma_move_to_active(), otherwise we could evict as required
|
||||
* with minimal stalling. Instead we are forced to idle the GPU and
|
||||
* explicitly retire outstanding requests which will then remove
|
||||
* the pinning for active objects such as contexts and ring,
|
||||
* enabling us to evict them on the next iteration.
|
||||
*
|
||||
* To ensure that all user contexts are evictable, we perform
|
||||
* a switch to the perma-pinned kernel context. This all also gives
|
||||
* us a termination condition, when the last retired context is
|
||||
* the kernel's there is no more we can evict.
|
||||
*/
|
||||
if (!ggtt_is_idle(dev_priv)) {
|
||||
ret = ggtt_flush(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
goto search_again;
|
||||
}
|
||||
|
||||
ret = ggtt_flush(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
goto search_again;
|
||||
/*
|
||||
* If we still have pending pageflip completions, drop
|
||||
* back to userspace to give our workqueues time to
|
||||
* acquire our locks and unpin the old scanouts.
|
||||
*/
|
||||
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
||||
|
||||
found:
|
||||
/* drm_mm doesn't allow any other other operations while
|
||||
|
@ -6998,6 +6998,7 @@ enum {
|
||||
*/
|
||||
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
|
||||
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
|
||||
#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
|
||||
|
||||
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
|
||||
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
|
||||
|
@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
|
||||
int *n_entries)
|
||||
{
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
|
||||
return hsw_ddi_translations_fdi;
|
||||
*n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
|
||||
return bdw_ddi_translations_fdi;
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
|
||||
return hsw_ddi_translations_fdi;
|
||||
@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
|
||||
* register writes.
|
||||
*/
|
||||
val = I915_READ(DPCLKA_CFGCR0);
|
||||
val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
|
||||
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
|
||||
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
|
||||
I915_WRITE(DPCLKA_CFGCR0, val);
|
||||
} else if (IS_GEN9_BC(dev_priv)) {
|
||||
/* DDI -> PLL mapping */
|
||||
|
@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
|
||||
/* 3. Configure DPLL_CFGCR0 */
|
||||
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
|
||||
if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
|
||||
if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
|
||||
val = pll->state.hw_state.cfgcr1;
|
||||
I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
|
||||
/* 4. Reab back to ensure writes completed */
|
||||
|
@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
|
||||
}
|
||||
|
||||
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
|
||||
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
|
||||
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
|
||||
L3_HIGH_PRIO_CREDITS(2));
|
||||
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
|
||||
u32 val = I915_READ(GEN8_L3SQCREG1);
|
||||
val &= ~L3_PRIO_CREDITS_MASK;
|
||||
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
|
||||
I915_WRITE(GEN8_L3SQCREG1, val);
|
||||
}
|
||||
|
||||
/* WaToEnableHwFixForPushConstHWBug:bxt */
|
||||
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
|
||||
|
@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
|
||||
int high_prio_credits)
|
||||
{
|
||||
u32 misccpctl;
|
||||
u32 val;
|
||||
|
||||
/* WaTempDisableDOPClkGating:bdw */
|
||||
misccpctl = I915_READ(GEN7_MISCCPCTL);
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
|
||||
I915_WRITE(GEN8_L3SQCREG1,
|
||||
L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
|
||||
L3_HIGH_PRIO_CREDITS(high_prio_credits));
|
||||
val = I915_READ(GEN8_L3SQCREG1);
|
||||
val &= ~L3_PRIO_CREDITS_MASK;
|
||||
val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
|
||||
val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
|
||||
I915_WRITE(GEN8_L3SQCREG1, val);
|
||||
|
||||
/*
|
||||
* Wait at least 100 clocks before re-enabling clock gating.
|
||||
|
@ -223,7 +223,7 @@ void
|
||||
nouveau_fbcon_accel_save_disable(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
if (drm->fbcon) {
|
||||
if (drm->fbcon && drm->fbcon->helper.fbdev) {
|
||||
drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
|
||||
drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
}
|
||||
@ -233,7 +233,7 @@ void
|
||||
nouveau_fbcon_accel_restore(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
if (drm->fbcon) {
|
||||
if (drm->fbcon && drm->fbcon->helper.fbdev) {
|
||||
drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
|
||||
}
|
||||
}
|
||||
@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
|
||||
struct nouveau_fbdev *fbcon = drm->fbcon;
|
||||
if (fbcon && drm->channel) {
|
||||
console_lock();
|
||||
fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
if (fbcon->helper.fbdev)
|
||||
fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
console_unlock();
|
||||
nouveau_channel_idle(drm->channel);
|
||||
nvif_object_fini(&fbcon->twod);
|
||||
|
@ -3265,11 +3265,14 @@ nv50_mstm = {
|
||||
void
|
||||
nv50_mstm_service(struct nv50_mstm *mstm)
|
||||
{
|
||||
struct drm_dp_aux *aux = mstm->mgr.aux;
|
||||
struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
|
||||
bool handled = true;
|
||||
int ret;
|
||||
u8 esi[8] = {};
|
||||
|
||||
if (!aux)
|
||||
return;
|
||||
|
||||
while (handled) {
|
||||
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
|
||||
if (ret != 8) {
|
||||
|
@ -39,5 +39,5 @@ int
|
||||
g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
|
||||
{
|
||||
return nvkm_xtensa_new_(&g84_bsp, device, index,
|
||||
true, 0x103000, pengine);
|
||||
device->chipset != 0x92, 0x103000, pengine);
|
||||
}
|
||||
|
@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
|
||||
}
|
||||
|
||||
mmu->func->flush(vm);
|
||||
|
||||
nvkm_memory_del(&pgt);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user