mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-02-22 05:16:38 +00:00
drm/ttm: Hide the implementation details of reservation
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com>
This commit is contained in:
parent
2844ea3f25
commit
c75230833c
@ -349,7 +349,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
|
|||||||
qxl_fence_add_release_locked(&qbo->fence, release->id);
|
qxl_fence_add_release_locked(&qbo->fence, release->id);
|
||||||
|
|
||||||
ttm_bo_add_to_lru(bo);
|
ttm_bo_add_to_lru(bo);
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
entry->reserved = false;
|
entry->reserved = false;
|
||||||
}
|
}
|
||||||
spin_unlock(&bdev->fence_lock);
|
spin_unlock(&bdev->fence_lock);
|
||||||
|
@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
ret = __ttm_bo_reserve(bo, false, true, false, 0);
|
||||||
|
|
||||||
spin_lock(&bdev->fence_lock);
|
spin_lock(&bdev->fence_lock);
|
||||||
(void) ttm_bo_wait(bo, false, false, true);
|
(void) ttm_bo_wait(bo, false, false, true);
|
||||||
@ -443,7 +443,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
|||||||
ttm_bo_add_to_lru(bo);
|
ttm_bo_add_to_lru(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
kref_get(&bo->list_kref);
|
kref_get(&bo->list_kref);
|
||||||
@ -494,7 +494,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
|
|||||||
sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||||
spin_unlock(&bdev->fence_lock);
|
spin_unlock(&bdev->fence_lock);
|
||||||
|
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
spin_unlock(&glob->lru_lock);
|
spin_unlock(&glob->lru_lock);
|
||||||
|
|
||||||
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
|
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
|
||||||
@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
ret = __ttm_bo_reserve(bo, false, true, false, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We raced, and lost, someone else holds the reservation now,
|
* We raced, and lost, someone else holds the reservation now,
|
||||||
@ -532,7 +532,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
|
|||||||
spin_unlock(&bdev->fence_lock);
|
spin_unlock(&bdev->fence_lock);
|
||||||
|
|
||||||
if (ret || unlikely(list_empty(&bo->ddestroy))) {
|
if (ret || unlikely(list_empty(&bo->ddestroy))) {
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
spin_unlock(&glob->lru_lock);
|
spin_unlock(&glob->lru_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
|||||||
kref_get(&nentry->list_kref);
|
kref_get(&nentry->list_kref);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
|
ret = __ttm_bo_reserve(entry, false, true, false, 0);
|
||||||
if (remove_all && ret) {
|
if (remove_all && ret) {
|
||||||
spin_unlock(&glob->lru_lock);
|
spin_unlock(&glob->lru_lock);
|
||||||
ret = ttm_bo_reserve_nolru(entry, false, false,
|
ret = __ttm_bo_reserve(entry, false, false,
|
||||||
false, 0);
|
false, 0);
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
|||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
list_for_each_entry(bo, &man->lru, lru) {
|
list_for_each_entry(bo, &man->lru, lru) {
|
||||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
ret = __ttm_bo_reserve(bo, false, true, false, 0);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|||||||
|
|
||||||
spin_lock(&glob->lru_lock);
|
spin_lock(&glob->lru_lock);
|
||||||
list_for_each_entry(bo, &glob->swap_lru, swap) {
|
list_for_each_entry(bo, &glob->swap_lru, swap) {
|
||||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
ret = __ttm_bo_reserve(bo, false, true, false, 0);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1697,7 +1697,7 @@ out:
|
|||||||
* already swapped buffer.
|
* already swapped buffer.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1731,10 +1731,10 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
|
|||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
if (!ww_mutex_is_locked(&bo->resv->lock))
|
if (!ww_mutex_is_locked(&bo->resv->lock))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
|
ret = __ttm_bo_reserve(bo, true, false, false, NULL);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&bo->wu_mutex);
|
mutex_unlock(&bo->wu_mutex);
|
||||||
|
@ -46,7 +46,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list)
|
|||||||
ttm_bo_add_to_lru(bo);
|
ttm_bo_add_to_lru(bo);
|
||||||
entry->removed = false;
|
entry->removed = false;
|
||||||
}
|
}
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,8 +140,8 @@ retry:
|
|||||||
if (entry->reserved)
|
if (entry->reserved)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
|
ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
|
||||||
ticket);
|
ticket);
|
||||||
|
|
||||||
if (ret == -EDEADLK) {
|
if (ret == -EDEADLK) {
|
||||||
/* uh oh, we lost out, drop every reservation and try
|
/* uh oh, we lost out, drop every reservation and try
|
||||||
@ -224,7 +224,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
|||||||
entry->old_sync_obj = bo->sync_obj;
|
entry->old_sync_obj = bo->sync_obj;
|
||||||
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||||
ttm_bo_add_to_lru(bo);
|
ttm_bo_add_to_lru(bo);
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
__ttm_bo_unreserve(bo);
|
||||||
entry->reserved = false;
|
entry->reserved = false;
|
||||||
}
|
}
|
||||||
spin_unlock(&bdev->fence_lock);
|
spin_unlock(&bdev->fence_lock);
|
||||||
|
@ -788,7 +788,7 @@ extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
|
|||||||
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
|
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ttm_bo_reserve_nolru:
|
* __ttm_bo_reserve:
|
||||||
*
|
*
|
||||||
* @bo: A pointer to a struct ttm_buffer_object.
|
* @bo: A pointer to a struct ttm_buffer_object.
|
||||||
* @interruptible: Sleep interruptible if waiting.
|
* @interruptible: Sleep interruptible if waiting.
|
||||||
@ -809,10 +809,10 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
|
|||||||
* -EALREADY: Bo already reserved using @ticket. This error code will only
|
* -EALREADY: Bo already reserved using @ticket. This error code will only
|
||||||
* be returned if @use_ticket is set to true.
|
* be returned if @use_ticket is set to true.
|
||||||
*/
|
*/
|
||||||
static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
|
static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||||
bool interruptible,
|
bool interruptible,
|
||||||
bool no_wait, bool use_ticket,
|
bool no_wait, bool use_ticket,
|
||||||
struct ww_acquire_ctx *ticket)
|
struct ww_acquire_ctx *ticket)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -888,8 +888,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
|||||||
|
|
||||||
WARN_ON(!atomic_read(&bo->kref.refcount));
|
WARN_ON(!atomic_read(&bo->kref.refcount));
|
||||||
|
|
||||||
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
|
ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
|
||||||
ticket);
|
|
||||||
if (likely(ret == 0))
|
if (likely(ret == 0))
|
||||||
ttm_bo_del_sub_from_lru(bo);
|
ttm_bo_del_sub_from_lru(bo);
|
||||||
|
|
||||||
@ -929,20 +928,14 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ttm_bo_unreserve_ticket
|
* __ttm_bo_unreserve
|
||||||
* @bo: A pointer to a struct ttm_buffer_object.
|
* @bo: A pointer to a struct ttm_buffer_object.
|
||||||
* @ticket: ww_acquire_ctx used for reserving
|
|
||||||
*
|
*
|
||||||
* Unreserve a previous reservation of @bo made with @ticket.
|
* Unreserve a previous reservation of @bo where the buffer object is
|
||||||
|
* already on lru lists.
|
||||||
*/
|
*/
|
||||||
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
|
static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
||||||
struct ww_acquire_ctx *t)
|
|
||||||
{
|
{
|
||||||
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
|
||||||
spin_lock(&bo->glob->lru_lock);
|
|
||||||
ttm_bo_add_to_lru(bo);
|
|
||||||
spin_unlock(&bo->glob->lru_lock);
|
|
||||||
}
|
|
||||||
ww_mutex_unlock(&bo->resv->lock);
|
ww_mutex_unlock(&bo->resv->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -955,7 +948,25 @@ static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
|
|||||||
*/
|
*/
|
||||||
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
ttm_bo_unreserve_ticket(bo, NULL);
|
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
||||||
|
spin_lock(&bo->glob->lru_lock);
|
||||||
|
ttm_bo_add_to_lru(bo);
|
||||||
|
spin_unlock(&bo->glob->lru_lock);
|
||||||
|
}
|
||||||
|
__ttm_bo_unreserve(bo);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ttm_bo_unreserve_ticket
|
||||||
|
* @bo: A pointer to a struct ttm_buffer_object.
|
||||||
|
* @ticket: ww_acquire_ctx used for reserving
|
||||||
|
*
|
||||||
|
* Unreserve a previous reservation of @bo made with @ticket.
|
||||||
|
*/
|
||||||
|
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
|
||||||
|
struct ww_acquire_ctx *t)
|
||||||
|
{
|
||||||
|
ttm_bo_unreserve(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user