mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-25 19:07:46 +00:00
vc4 and nouveau fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJaawnZAAoJEAx081l5xIa+HNcP/RiVEwwAfSu0MzMosYvf2P53 ekyWyHobwBzuGo5xjG0XvVbexXiad1D/JXCiNjoNPS65O/sbCG4nfh/w1mGlrJwi QXOnx0CDBwReITbzwMfyJl+gcsHnnKt2jPK5RJUqNy/0fEQKNNhnTsRL/rAzkvDc 8Bc06blwXoCmPMAUsju3htzxCOKS+AgIqYH8qEDcZ+6aOA2f2/LU/hnpYdhl//CE IOujzKIoJhmNqvndAb7conik+PiKzlq3GEpx966QMZajnu6LKH8iFoFt5M7Jg6cD vcTEzLGZCIimb5wOqeLq3t9rgS05oScNKRryCxOGB9nTrgwhqAgRUQH3MCUVx+GZ OybTr3QmS/Oq7a0XjB8LU2M86zR192Kvl5xzmUgT9bhbPdvzR65e6C/I/02+75BY 2FXrn1nGTFXApGPGKjmUo2hyRsSyVudfD6f4JUJX5rlbXiwyv2kfZv/2pCjnLYZt sAlawMKp+rv628Tx9rPD/dWvMR5Ftrqp55b4eNEZPnsqNMjlEvZjgy+fHJ4VPIII x9TJYTMlHqjy/tpWWn21qzMbRST1bNB1AaQnLNY10DaRkelEN2lPeNrG2xzC7+YG 8Y/p3Tezmu15dlIx4KUcC+aFUntDS5mdzJuOgnc970DkKTMuPqMMSX433McCNKFG h3AaLo0IGQh+Dz2z1H0A =r4NK -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.15-rc10-2' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "A fairly urgent nouveau regression fix for broken irqs across suspend/resume came in. This was broken before but a patch in 4.15 has made it much more obviously broken and now s/r fails a lot more often. The fix removes freeing the irq across s/r which never should have been done anyways. Also two vc4 fixes for a NULL deference and some misrendering / flickering on screen" * tag 'drm-fixes-for-v4.15-rc10-2' of git://people.freedesktop.org/~airlied/linux: drm/nouveau: Move irq setup/teardown to pci ctor/dtor drm/vc4: Fix NULL pointer dereference in vc4_save_hang_state() drm/vc4: Flush the caches before the bin jobs, as well.
This commit is contained in:
commit
db218549e6
@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
|
||||
struct nvkm_pci *pci = arg;
|
||||
struct nvkm_device *device = pci->subdev.device;
|
||||
bool handled = false;
|
||||
|
||||
if (pci->irq < 0)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
nvkm_mc_intr_unarm(device);
|
||||
if (pci->msi)
|
||||
pci->func->msi_rearm(pci);
|
||||
@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
|
||||
{
|
||||
struct nvkm_pci *pci = nvkm_pci(subdev);
|
||||
|
||||
if (pci->irq >= 0) {
|
||||
free_irq(pci->irq, pci);
|
||||
pci->irq = -1;
|
||||
}
|
||||
|
||||
if (pci->agp.bridge)
|
||||
nvkm_agp_fini(pci);
|
||||
|
||||
@ -108,8 +107,20 @@ static int
|
||||
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pci *pci = nvkm_pci(subdev);
|
||||
if (pci_is_pcie(pci->pdev))
|
||||
return nvkm_pcie_oneinit(pci);
|
||||
struct pci_dev *pdev = pci->pdev;
|
||||
int ret;
|
||||
|
||||
if (pci_is_pcie(pci->pdev)) {
|
||||
ret = nvkm_pcie_oneinit(pci);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci->irq = pdev->irq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -117,7 +128,6 @@ static int
|
||||
nvkm_pci_init(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pci *pci = nvkm_pci(subdev);
|
||||
struct pci_dev *pdev = pci->pdev;
|
||||
int ret;
|
||||
|
||||
if (pci->agp.bridge) {
|
||||
@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
|
||||
if (pci->func->init)
|
||||
pci->func->init(pci);
|
||||
|
||||
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci->irq = pdev->irq;
|
||||
|
||||
/* Ensure MSI interrupts are armed, for the case where there are
|
||||
* already interrupts pending (for whatever reason) at load time.
|
||||
*/
|
||||
if (pci->msi)
|
||||
pci->func->msi_rearm(pci);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
nvkm_pci_dtor(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pci *pci = nvkm_pci(subdev);
|
||||
|
||||
nvkm_agp_dtor(pci);
|
||||
|
||||
if (pci->irq >= 0) {
|
||||
/* freq_irq() will call the handler, we use pci->irq == -1
|
||||
* to signal that it's been torn down and should be a noop.
|
||||
*/
|
||||
int irq = pci->irq;
|
||||
pci->irq = -1;
|
||||
free_irq(irq, pci);
|
||||
}
|
||||
|
||||
if (pci->msi)
|
||||
pci_disable_msi(pci->pdev);
|
||||
|
||||
return nvkm_pci(subdev);
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
|
||||
struct vc4_exec_info *exec[2];
|
||||
struct vc4_bo *bo;
|
||||
unsigned long irqflags;
|
||||
unsigned int i, j, unref_list_count, prev_idx;
|
||||
unsigned int i, j, k, unref_list_count;
|
||||
|
||||
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
|
||||
if (!kernel_state)
|
||||
@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
prev_idx = 0;
|
||||
k = 0;
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (!exec[i])
|
||||
continue;
|
||||
@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
|
||||
WARN_ON(!refcount_read(&bo->usecnt));
|
||||
refcount_inc(&bo->usecnt);
|
||||
drm_gem_object_get(&exec[i]->bo[j]->base);
|
||||
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
|
||||
kernel_state->bo[k++] = &exec[i]->bo[j]->base;
|
||||
}
|
||||
|
||||
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
|
||||
@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
|
||||
* because they are naturally unpurgeable.
|
||||
*/
|
||||
drm_gem_object_get(&bo->base.base);
|
||||
kernel_state->bo[j + prev_idx] = &bo->base.base;
|
||||
j++;
|
||||
kernel_state->bo[k++] = &bo->base.base;
|
||||
}
|
||||
prev_idx = j + 1;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(k != state->bo_count);
|
||||
|
||||
if (exec[0])
|
||||
state->start_bin = exec[0]->ct0ca;
|
||||
if (exec[1])
|
||||
@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
|
||||
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
|
||||
}
|
||||
|
||||
static void
|
||||
vc4_flush_texture_caches(struct drm_device *dev)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
|
||||
V3D_WRITE(V3D_L2CACTL,
|
||||
V3D_L2CACTL_L2CCLR);
|
||||
|
||||
V3D_WRITE(V3D_SLCACTL,
|
||||
VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
|
||||
VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
|
||||
}
|
||||
|
||||
/* Sets the registers for the next job to be actually be executed in
|
||||
* the hardware.
|
||||
*
|
||||
@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
|
||||
if (!exec)
|
||||
return;
|
||||
|
||||
/* A previous RCL may have written to one of our textures, and
|
||||
* our full cache flush at bin time may have occurred before
|
||||
* that RCL completed. Flush the texture cache now, but not
|
||||
* the instructions or uniforms (since we don't write those
|
||||
* from an RCL).
|
||||
*/
|
||||
vc4_flush_texture_caches(dev);
|
||||
|
||||
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user