mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-28 20:37:27 +00:00
drm/nouveau: reduce usage of fence spinlock to when absolutely necessary
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
ca6adb8a21
commit
047d1d3cae
@ -258,9 +258,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
|
|||||||
nouveau_debugfs_channel_fini(chan);
|
nouveau_debugfs_channel_fini(chan);
|
||||||
|
|
||||||
/* Give outstanding push buffers a chance to complete */
|
/* Give outstanding push buffers a chance to complete */
|
||||||
spin_lock_irqsave(&chan->fence.lock, flags);
|
|
||||||
nouveau_fence_update(chan);
|
nouveau_fence_update(chan);
|
||||||
spin_unlock_irqrestore(&chan->fence.lock, flags);
|
|
||||||
if (chan->fence.sequence != chan->fence.sequence_ack) {
|
if (chan->fence.sequence != chan->fence.sequence_ack) {
|
||||||
struct nouveau_fence *fence = NULL;
|
struct nouveau_fence *fence = NULL;
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ struct nouveau_channel {
|
|||||||
struct list_head pending;
|
struct list_head pending;
|
||||||
uint32_t sequence;
|
uint32_t sequence;
|
||||||
uint32_t sequence_ack;
|
uint32_t sequence_ack;
|
||||||
uint32_t last_sequence_irq;
|
atomic_t last_sequence_irq;
|
||||||
} fence;
|
} fence;
|
||||||
|
|
||||||
/* DMA push buffer */
|
/* DMA push buffer */
|
||||||
@ -1111,7 +1111,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
|
|||||||
extern int nouveau_fence_flush(void *obj, void *arg);
|
extern int nouveau_fence_flush(void *obj, void *arg);
|
||||||
extern void nouveau_fence_unref(void **obj);
|
extern void nouveau_fence_unref(void **obj);
|
||||||
extern void *nouveau_fence_ref(void *obj);
|
extern void *nouveau_fence_ref(void *obj);
|
||||||
extern void nouveau_fence_handler(struct drm_device *dev, int channel);
|
|
||||||
|
|
||||||
/* nouveau_gem.c */
|
/* nouveau_gem.c */
|
||||||
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
|
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
|
||||||
|
@ -67,12 +67,13 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|||||||
if (USE_REFCNT)
|
if (USE_REFCNT)
|
||||||
sequence = nvchan_rd32(chan, 0x48);
|
sequence = nvchan_rd32(chan, 0x48);
|
||||||
else
|
else
|
||||||
sequence = chan->fence.last_sequence_irq;
|
sequence = atomic_read(&chan->fence.last_sequence_irq);
|
||||||
|
|
||||||
if (chan->fence.sequence_ack == sequence)
|
if (chan->fence.sequence_ack == sequence)
|
||||||
return;
|
return;
|
||||||
chan->fence.sequence_ack = sequence;
|
chan->fence.sequence_ack = sequence;
|
||||||
|
|
||||||
|
spin_lock(&chan->fence.lock);
|
||||||
list_for_each_safe(entry, tmp, &chan->fence.pending) {
|
list_for_each_safe(entry, tmp, &chan->fence.pending) {
|
||||||
fence = list_entry(entry, struct nouveau_fence, entry);
|
fence = list_entry(entry, struct nouveau_fence, entry);
|
||||||
|
|
||||||
@ -84,6 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|||||||
if (sequence == chan->fence.sequence_ack)
|
if (sequence == chan->fence.sequence_ack)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
spin_unlock(&chan->fence.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -119,7 +121,6 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
|||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
|
struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
|
||||||
struct nouveau_channel *chan = fence->channel;
|
struct nouveau_channel *chan = fence->channel;
|
||||||
unsigned long flags;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = RING_SPACE(chan, 2);
|
ret = RING_SPACE(chan, 2);
|
||||||
@ -127,9 +128,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
|
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
|
||||||
spin_lock_irqsave(&chan->fence.lock, flags);
|
|
||||||
nouveau_fence_update(chan);
|
nouveau_fence_update(chan);
|
||||||
spin_unlock_irqrestore(&chan->fence.lock, flags);
|
|
||||||
|
|
||||||
BUG_ON(chan->fence.sequence ==
|
BUG_ON(chan->fence.sequence ==
|
||||||
chan->fence.sequence_ack - 1);
|
chan->fence.sequence_ack - 1);
|
||||||
@ -138,9 +137,9 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
|||||||
fence->sequence = ++chan->fence.sequence;
|
fence->sequence = ++chan->fence.sequence;
|
||||||
|
|
||||||
kref_get(&fence->refcount);
|
kref_get(&fence->refcount);
|
||||||
spin_lock_irqsave(&chan->fence.lock, flags);
|
spin_lock(&chan->fence.lock);
|
||||||
list_add_tail(&fence->entry, &chan->fence.pending);
|
list_add_tail(&fence->entry, &chan->fence.pending);
|
||||||
spin_unlock_irqrestore(&chan->fence.lock, flags);
|
spin_unlock(&chan->fence.lock);
|
||||||
|
|
||||||
BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
|
BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
|
||||||
OUT_RING(chan, fence->sequence);
|
OUT_RING(chan, fence->sequence);
|
||||||
@ -173,14 +172,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
|
|||||||
{
|
{
|
||||||
struct nouveau_fence *fence = nouveau_fence(sync_obj);
|
struct nouveau_fence *fence = nouveau_fence(sync_obj);
|
||||||
struct nouveau_channel *chan = fence->channel;
|
struct nouveau_channel *chan = fence->channel;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (fence->signalled)
|
if (fence->signalled)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
spin_lock_irqsave(&chan->fence.lock, flags);
|
|
||||||
nouveau_fence_update(chan);
|
nouveau_fence_update(chan);
|
||||||
spin_unlock_irqrestore(&chan->fence.lock, flags);
|
|
||||||
return fence->signalled;
|
return fence->signalled;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,27 +217,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
nouveau_fence_handler(struct drm_device *dev, int channel)
|
|
||||||
{
|
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
struct nouveau_channel *chan = NULL;
|
|
||||||
|
|
||||||
if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
|
|
||||||
chan = dev_priv->fifos[channel];
|
|
||||||
|
|
||||||
if (chan) {
|
|
||||||
spin_lock_irq(&chan->fence.lock);
|
|
||||||
nouveau_fence_update(chan);
|
|
||||||
spin_unlock_irq(&chan->fence.lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
nouveau_fence_init(struct nouveau_channel *chan)
|
nouveau_fence_init(struct nouveau_channel *chan)
|
||||||
{
|
{
|
||||||
INIT_LIST_HEAD(&chan->fence.pending);
|
INIT_LIST_HEAD(&chan->fence.pending);
|
||||||
spin_lock_init(&chan->fence.lock);
|
spin_lock_init(&chan->fence.lock);
|
||||||
|
atomic_set(&chan->fence.last_sequence_irq, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -527,8 +527,7 @@ static int
|
|||||||
nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
|
nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
|
||||||
int mthd, uint32_t data)
|
int mthd, uint32_t data)
|
||||||
{
|
{
|
||||||
chan->fence.last_sequence_irq = data;
|
atomic_set(&chan->fence.last_sequence_irq, data);
|
||||||
nouveau_fence_handler(chan->dev, chan->id);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user