mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-15 21:30:43 +00:00
drm: add unified vma offset manager
If we want to map GPU memory into user-space, we need to linearize the addresses to not confuse mm-core. Currently, GEM and TTM both implement their own offset-managers to assign a pgoff to each object for user-space CPU access. GEM uses a hash-table, TTM uses an rbtree. This patch provides a unified implementation that can be used to replace both. TTM allows partial mmaps with a given offset, so we cannot use hashtables as the start address may not be known at mmap time. Hence, we use the rbtree-implementation of TTM. We could easily update drm_mm to use an rbtree instead of a linked list for it's object list and thus drop the rbtree from the vma-manager. However, this would slow down drm_mm object allocation for all other use-cases (rbtree insertion) and add another 4-8 bytes to each mm node. Hence, use the separate tree but allow for later migration. This is a rewrite of the 2012-proposal by David Airlie <airlied@linux.ie> v2: - fix Docbook integration - drop drm_mm_node_linked() and use drm_mm_node_allocated() - remove unjustified likely/unlikely usage (but keep for rbtree paths) - remove BUG_ON() as drm_mm already does that - clarify page-based vs. byte-based addresses - use drm_vma_node_reset() for initialization, too v4: - allow external locking via drm_vma_offset_un/lock_lookup() - add locked lookup helper drm_vma_offset_lookup_locked() v5: - fix drm_vma_offset_lookup() to correctly validate range-mismatches (fix (offset > start + pages)) - fix drm_vma_offset_exact_lookup() to actually do what it says - remove redundant vm_pages member (add drm_vma_node_size() helper) - remove unneeded goto - fix documentation Signed-off-by: David Herrmann <dh.herrmann@gmail.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@gmail.com>
This commit is contained in:
parent
85d9cb41db
commit
fe3078fa5c
@ -2212,6 +2212,12 @@ void intel_crt_init(struct drm_device *dev)
|
||||
!Iinclude/drm/drm_rect.h
|
||||
!Edrivers/gpu/drm/drm_rect.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>VMA Offset Manager</title>
|
||||
!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
|
||||
!Edrivers/gpu/drm/drm_vma_manager.c
|
||||
!Iinclude/drm/drm_vma_manager.h
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<!-- Internals: kms properties -->
|
||||
|
@ -13,7 +13,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
|
||||
drm_crtc.o drm_modes.o drm_edid.o \
|
||||
drm_info.o drm_debugfs.o drm_encoder_slave.o \
|
||||
drm_trace_points.o drm_global.o drm_prime.o \
|
||||
drm_rect.o
|
||||
drm_rect.o drm_vma_manager.o
|
||||
|
||||
drm-$(CONFIG_COMPAT) += drm_ioc32.o
|
||||
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
|
||||
|
281
drivers/gpu/drm/drm_vma_manager.c
Normal file
281
drivers/gpu/drm/drm_vma_manager.c
Normal file
@ -0,0 +1,281 @@
|
||||
/*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* Copyright (c) 2012 David Airlie <airlied@linux.ie>
|
||||
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_mm.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* DOC: vma offset manager
|
||||
*
|
||||
* The vma-manager is responsible to map arbitrary driver-dependent memory
|
||||
* regions into the linear user address-space. It provides offsets to the
|
||||
* caller which can then be used on the address_space of the drm-device. It
|
||||
* takes care to not overlap regions, size them appropriately and to not
|
||||
* confuse mm-core by inconsistent fake vm_pgoff fields.
|
||||
* Drivers shouldn't use this for object placement in VMEM. This manager should
|
||||
* only be used to manage mappings into linear user-space VMs.
|
||||
*
|
||||
* We use drm_mm as backend to manage object allocations. But it is highly
|
||||
* optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
|
||||
* speed up offset lookups.
|
||||
*
|
||||
* You must not use multiple offset managers on a single address_space.
|
||||
* Otherwise, mm-core will be unable to tear down memory mappings as the VM will
|
||||
* no longer be linear. Please use VM_NONLINEAR in that case and implement your
|
||||
* own offset managers.
|
||||
*
|
||||
* This offset manager works on page-based addresses. That is, every argument
|
||||
* and return code (with the exception of drm_vma_node_offset_addr()) is given
|
||||
* in number of pages, not number of bytes. That means, object sizes and offsets
|
||||
* must always be page-aligned (as usual).
|
||||
* If you want to get a valid byte-based user-space address for a given offset,
|
||||
* please see drm_vma_node_offset_addr().
|
||||
*/
|
||||
|
||||
/**
|
||||
* drm_vma_offset_manager_init - Initialize new offset-manager
|
||||
* @mgr: Manager object
|
||||
* @page_offset: Offset of available memory area (page-based)
|
||||
* @size: Size of available address space range (page-based)
|
||||
*
|
||||
* Initialize a new offset-manager. The offset and area size available for the
|
||||
* manager are given as @page_offset and @size. Both are interpreted as
|
||||
* page-numbers, not bytes.
|
||||
*
|
||||
* Adding/removing nodes from the manager is locked internally and protected
|
||||
* against concurrent access. However, node allocation and destruction is left
|
||||
* for the caller. While calling into the vma-manager, a given node must
|
||||
* always be guaranteed to be referenced.
|
||||
*/
|
||||
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
|
||||
unsigned long page_offset, unsigned long size)
|
||||
{
|
||||
rwlock_init(&mgr->vm_lock);
|
||||
mgr->vm_addr_space_rb = RB_ROOT;
|
||||
drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vma_offset_manager_init);
|
||||
|
||||
/**
|
||||
* drm_vma_offset_manager_destroy() - Destroy offset manager
|
||||
* @mgr: Manager object
|
||||
*
|
||||
* Destroy an object manager which was previously created via
|
||||
* drm_vma_offset_manager_init(). The caller must remove all allocated nodes
|
||||
* before destroying the manager. Otherwise, drm_mm will refuse to free the
|
||||
* requested resources.
|
||||
*
|
||||
* The manager must not be accessed after this function is called.
|
||||
*/
|
||||
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
|
||||
{
|
||||
/* take the lock to protect against buggy drivers */
|
||||
write_lock(&mgr->vm_lock);
|
||||
drm_mm_takedown(&mgr->vm_addr_space_mm);
|
||||
write_unlock(&mgr->vm_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
|
||||
|
||||
/**
|
||||
* drm_vma_offset_lookup() - Find node in offset space
|
||||
* @mgr: Manager object
|
||||
* @start: Start address for object (page-based)
|
||||
* @pages: Size of object (page-based)
|
||||
*
|
||||
* Find a node given a start address and object size. This returns the _best_
|
||||
* match for the given node. That is, @start may point somewhere into a valid
|
||||
* region and the given node will be returned, as long as the node spans the
|
||||
* whole requested area (given the size in number of pages as @pages).
|
||||
*
|
||||
* RETURNS:
|
||||
* Returns NULL if no suitable node can be found. Otherwise, the best match
|
||||
* is returned. It's the caller's responsibility to make sure the node doesn't
|
||||
* get destroyed before the caller can access it.
|
||||
*/
|
||||
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
|
||||
unsigned long start,
|
||||
unsigned long pages)
|
||||
{
|
||||
struct drm_vma_offset_node *node;
|
||||
|
||||
read_lock(&mgr->vm_lock);
|
||||
node = drm_vma_offset_lookup_locked(mgr, start, pages);
|
||||
read_unlock(&mgr->vm_lock);
|
||||
|
||||
return node;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vma_offset_lookup);
|
||||
|
||||
/**
|
||||
* drm_vma_offset_lookup_locked() - Find node in offset space
|
||||
* @mgr: Manager object
|
||||
* @start: Start address for object (page-based)
|
||||
* @pages: Size of object (page-based)
|
||||
*
|
||||
* Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
|
||||
* manually. See drm_vma_offset_lock_lookup() for an example.
|
||||
*
|
||||
* RETURNS:
|
||||
* Returns NULL if no suitable node can be found. Otherwise, the best match
|
||||
* is returned.
|
||||
*/
|
||||
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
|
||||
unsigned long start,
|
||||
unsigned long pages)
|
||||
{
|
||||
struct drm_vma_offset_node *node, *best;
|
||||
struct rb_node *iter;
|
||||
unsigned long offset;
|
||||
|
||||
iter = mgr->vm_addr_space_rb.rb_node;
|
||||
best = NULL;
|
||||
|
||||
while (likely(iter)) {
|
||||
node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
|
||||
offset = node->vm_node.start;
|
||||
if (start >= offset) {
|
||||
iter = iter->rb_right;
|
||||
best = node;
|
||||
if (start == offset)
|
||||
break;
|
||||
} else {
|
||||
iter = iter->rb_left;
|
||||
}
|
||||
}
|
||||
|
||||
/* verify that the node spans the requested area */
|
||||
if (best) {
|
||||
offset = best->vm_node.start + best->vm_node.size;
|
||||
if (offset < start + pages)
|
||||
best = NULL;
|
||||
}
|
||||
|
||||
return best;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
|
||||
|
||||
/* internal helper to link @node into the rb-tree */
|
||||
static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
|
||||
struct drm_vma_offset_node *node)
|
||||
{
|
||||
struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct drm_vma_offset_node *iter_node;
|
||||
|
||||
while (likely(*iter)) {
|
||||
parent = *iter;
|
||||
iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
|
||||
|
||||
if (node->vm_node.start < iter_node->vm_node.start)
|
||||
iter = &(*iter)->rb_left;
|
||||
else if (node->vm_node.start > iter_node->vm_node.start)
|
||||
iter = &(*iter)->rb_right;
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
rb_link_node(&node->vm_rb, parent, iter);
|
||||
rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_offset_add() - Add offset node to manager
|
||||
* @mgr: Manager object
|
||||
* @node: Node to be added
|
||||
* @pages: Allocation size visible to user-space (in number of pages)
|
||||
*
|
||||
* Add a node to the offset-manager. If the node was already added, this does
|
||||
* nothing and return 0. @pages is the size of the object given in number of
|
||||
* pages.
|
||||
* After this call succeeds, you can access the offset of the node until it
|
||||
* is removed again.
|
||||
*
|
||||
* If this call fails, it is safe to retry the operation or call
|
||||
* drm_vma_offset_remove(), anyway. However, no cleanup is required in that
|
||||
* case.
|
||||
*
|
||||
* @pages is not required to be the same size as the underlying memory object
|
||||
* that you want to map. It only limits the size that user-space can map into
|
||||
* their address space.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, negative error code on failure.
|
||||
*/
|
||||
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
|
||||
struct drm_vma_offset_node *node, unsigned long pages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
write_lock(&mgr->vm_lock);
|
||||
|
||||
if (drm_mm_node_allocated(&node->vm_node)) {
|
||||
ret = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = drm_mm_insert_node_generic(&mgr->vm_addr_space_mm,
|
||||
&node->vm_node, pages, 0, 0);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
_drm_vma_offset_add_rb(mgr, node);
|
||||
|
||||
out_unlock:
|
||||
write_unlock(&mgr->vm_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vma_offset_add);
|
||||
|
||||
/**
|
||||
* drm_vma_offset_remove() - Remove offset node from manager
|
||||
* @mgr: Manager object
|
||||
* @node: Node to be removed
|
||||
*
|
||||
* Remove a node from the offset manager. If the node wasn't added before, this
|
||||
* does nothing. After this call returns, the offset and size will be 0 until a
|
||||
* new offset is allocated via drm_vma_offset_add() again. Helper functions like
|
||||
* drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
|
||||
* offset is allocated.
|
||||
*/
|
||||
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
|
||||
struct drm_vma_offset_node *node)
|
||||
{
|
||||
write_lock(&mgr->vm_lock);
|
||||
|
||||
if (drm_mm_node_allocated(&node->vm_node)) {
|
||||
rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
|
||||
drm_mm_remove_node(&node->vm_node);
|
||||
memset(&node->vm_node, 0, sizeof(node->vm_node));
|
||||
}
|
||||
|
||||
write_unlock(&mgr->vm_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vma_offset_remove);
|
202
include/drm/drm_vma_manager.h
Normal file
202
include/drm/drm_vma_manager.h
Normal file
@ -0,0 +1,202 @@
|
||||
#ifndef __DRM_VMA_MANAGER_H__
|
||||
#define __DRM_VMA_MANAGER_H__
|
||||
|
||||
/*
|
||||
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drm_mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_vma_offset_node {
|
||||
struct drm_mm_node vm_node;
|
||||
struct rb_node vm_rb;
|
||||
};
|
||||
|
||||
struct drm_vma_offset_manager {
|
||||
rwlock_t vm_lock;
|
||||
struct rb_root vm_addr_space_rb;
|
||||
struct drm_mm vm_addr_space_mm;
|
||||
};
|
||||
|
||||
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
|
||||
unsigned long page_offset, unsigned long size);
|
||||
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
|
||||
|
||||
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
|
||||
unsigned long start,
|
||||
unsigned long pages);
|
||||
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
|
||||
unsigned long start,
|
||||
unsigned long pages);
|
||||
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
|
||||
struct drm_vma_offset_node *node, unsigned long pages);
|
||||
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
|
||||
struct drm_vma_offset_node *node);
|
||||
|
||||
/**
|
||||
* drm_vma_offset_exact_lookup() - Look up node by exact address
|
||||
* @mgr: Manager object
|
||||
* @start: Start address (page-based, not byte-based)
|
||||
* @pages: Size of object (page-based)
|
||||
*
|
||||
* Same as drm_vma_offset_lookup() but does not allow any offset into the node.
|
||||
* It only returns the exact object with the given start address.
|
||||
*
|
||||
* RETURNS:
|
||||
* Node at exact start address @start.
|
||||
*/
|
||||
static inline struct drm_vma_offset_node *
|
||||
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
|
||||
unsigned long start,
|
||||
unsigned long pages)
|
||||
{
|
||||
struct drm_vma_offset_node *node;
|
||||
|
||||
node = drm_vma_offset_lookup(mgr, start, pages);
|
||||
return (node && node->vm_node.start == start) ? node : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_offset_lock_lookup() - Lock lookup for extended private use
|
||||
* @mgr: Manager object
|
||||
*
|
||||
* Lock VMA manager for extended lookups. Only *_locked() VMA function calls
|
||||
* are allowed while holding this lock. All other contexts are blocked from VMA
|
||||
* until the lock is released via drm_vma_offset_unlock_lookup().
|
||||
*
|
||||
* Use this if you need to take a reference to the objects returned by
|
||||
* drm_vma_offset_lookup_locked() before releasing this lock again.
|
||||
*
|
||||
* This lock must not be used for anything else than extended lookups. You must
|
||||
* not call any other VMA helpers while holding this lock.
|
||||
*
|
||||
* Note: You're in atomic-context while holding this lock!
|
||||
*
|
||||
* Example:
|
||||
* drm_vma_offset_lock_lookup(mgr);
|
||||
* node = drm_vma_offset_lookup_locked(mgr);
|
||||
* if (node)
|
||||
* kref_get_unless_zero(container_of(node, sth, entr));
|
||||
* drm_vma_offset_unlock_lookup(mgr);
|
||||
*/
|
||||
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
|
||||
{
|
||||
read_lock(&mgr->vm_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
|
||||
* @mgr: Manager object
|
||||
*
|
||||
* Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
|
||||
*/
|
||||
static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
|
||||
{
|
||||
read_unlock(&mgr->vm_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_node_reset() - Initialize or reset node object
|
||||
* @node: Node to initialize or reset
|
||||
*
|
||||
* Reset a node to its initial state. This must be called if @node isn't
|
||||
* already cleared (eg., via kzalloc) before using it with any VMA offset
|
||||
* manager.
|
||||
*
|
||||
* This must not be called on an already allocated node, or you will leak
|
||||
* memory.
|
||||
*/
|
||||
static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
|
||||
{
|
||||
memset(node, 0, sizeof(*node));
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_node_start() - Return start address for page-based addressing
|
||||
* @node: Node to inspect
|
||||
*
|
||||
* Return the start address of the given node. This can be used as offset into
|
||||
* the linear VM space that is provided by the VMA offset manager. Note that
|
||||
* this can only be used for page-based addressing. If you need a proper offset
|
||||
* for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
|
||||
* drm_vma_node_offset_addr() helper instead.
|
||||
*
|
||||
* RETURNS:
|
||||
* Start address of @node for page-based addressing. 0 if the node does not
|
||||
* have an offset allocated.
|
||||
*/
|
||||
static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
|
||||
{
|
||||
return node->vm_node.start;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_node_size() - Return size (page-based)
|
||||
* @node: Node to inspect
|
||||
*
|
||||
* Return the size as number of pages for the given node. This is the same size
|
||||
* that was passed to drm_vma_offset_add(). If no offset is allocated for the
|
||||
* node, this is 0.
|
||||
*
|
||||
* RETURNS:
|
||||
* Size of @node as number of pages. 0 if the node does not have an offset
|
||||
* allocated.
|
||||
*/
|
||||
static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
|
||||
{
|
||||
return node->vm_node.size;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_node_has_offset() - Check whether node is added to offset manager
|
||||
* @node: Node to be checked
|
||||
*
|
||||
* RETURNS:
|
||||
* true iff the node was previously allocated an offset and added to
|
||||
* an vma offset manager.
|
||||
*/
|
||||
static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
|
||||
{
|
||||
return drm_mm_node_allocated(&node->vm_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
|
||||
* @node: Linked offset node
|
||||
*
|
||||
* Same as drm_vma_node_start() but returns the address as a valid offset that
|
||||
* can be used for user-space mappings during mmap().
|
||||
* This must not be called on unlinked nodes.
|
||||
*
|
||||
* RETURNS:
|
||||
* Offset of @node for byte-based addressing. 0 if the node does not have an
|
||||
* object allocated.
|
||||
*/
|
||||
static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
|
||||
{
|
||||
return ((__u64)node->vm_node.start) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#endif /* __DRM_VMA_MANAGER_H__ */
|
Loading…
Reference in New Issue
Block a user