aboutsummaryrefslogtreecommitdiff
path: root/include/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/ttm')
-rw-r--r--include/drm/ttm/ttm_bo_api.h77
-rw-r--r--include/drm/ttm/ttm_bo_driver.h270
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h18
-rw-r--r--include/drm/ttm/ttm_lock.h2
-rw-r--r--include/drm/ttm/ttm_memory.h3
-rw-r--r--include/drm/ttm/ttm_object.h89
-rw-r--r--include/drm/ttm/ttm_page_alloc.h17
-rw-r--r--include/drm/ttm/ttm_placement.h3
8 files changed, 303 insertions, 176 deletions
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 974c8f801c3..7526c5bf561 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -31,14 +31,15 @@
#ifndef _TTM_BO_API_H_
#define _TTM_BO_API_H_
-#include "drm_hashtab.h"
+#include <drm/drm_hashtab.h>
+#include <drm/drm_vma_manager.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/mm.h>
-#include <linux/rbtree.h>
#include <linux/bitmap.h>
+#include <linux/reservation.h>
struct ttm_bo_device;
@@ -124,11 +125,15 @@ struct ttm_mem_reg {
*
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
* but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
*/
enum ttm_bo_type {
ttm_bo_type_device,
- ttm_bo_type_kernel
+ ttm_bo_type_kernel,
+ ttm_bo_type_sg
};
struct ttm_tt;
@@ -137,12 +142,9 @@ struct ttm_tt;
* struct ttm_buffer_object
*
* @bdev: Pointer to the buffer object device structure.
- * @buffer_start: The virtual user-space start address of ttm_bo_type_user
- * buffers.
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
- * @addr_space_offset: Address space offset.
* @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is put on the delayed delete list.
@@ -151,7 +153,6 @@ struct ttm_tt;
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
* keeps one refcount. When this refcount reaches zero,
* the object is destroyed.
- * @event_queue: Queue for processes waiting on buffer object status change.
* @mem: structure describing current placement.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
@@ -162,20 +163,13 @@ struct ttm_tt;
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @val_seq: Sequence of the validation holding the @reserved lock.
- * Used to avoid starvation when many processes compete to validate the
- * buffer. This member is protected by the bo_device::lru_lock.
- * @seq_valid: The value of @val_seq is valid. This value is protected by
- * the bo_device::lru_lock.
- * @reserved: Deadlock-free lock used for synchronization state transitions.
- * @sync_obj_arg: Opaque argument to synchronization object function.
* @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
- * @vm_rb: Rb node for the vm rb tree.
- * @vm_node: Address space manager node.
+ * @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
+ * @wu_mutex: Wait unreserved mutex.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -196,11 +190,9 @@ struct ttm_buffer_object {
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- unsigned long buffer_start;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
- uint64_t addr_space_offset;
size_t acc_size;
/**
@@ -209,10 +201,9 @@ struct ttm_buffer_object {
struct kref kref;
struct kref list_kref;
- wait_queue_head_t event_queue;
/**
- * Members protected by the bo::reserved lock.
+ * Members protected by the bo::resv::reserved lock.
*/
struct ttm_mem_reg mem;
@@ -234,15 +225,6 @@ struct ttm_buffer_object {
struct list_head ddestroy;
struct list_head swap;
struct list_head io_reserve_lru;
- uint32_t val_seq;
- bool seq_valid;
-
- /**
- * Members protected by the bdev::lru_lock
- * only when written to.
- */
-
- atomic_t reserved;
/**
* Members protected by struct buffer_object_device::fence_lock
@@ -251,17 +233,10 @@ struct ttm_buffer_object {
* checking NULL while reserved but not holding the mentioned lock.
*/
- void *sync_obj_arg;
void *sync_obj;
unsigned long priv_flags;
- /**
- * Members protected by the bdev::vm_lock
- */
-
- struct rb_node vm_rb;
- struct drm_mm_node *vm_node;
-
+ struct drm_vma_offset_node vma_node;
/**
* Special members that are protected by the reserve lock
@@ -271,6 +246,12 @@ struct ttm_buffer_object {
unsigned long offset;
uint32_t cur_placement;
+
+ struct sg_table *sg;
+
+ struct reservation_object *resv;
+ struct reservation_object ttm_resv;
+ struct mutex wu_mutex;
};
/**
@@ -336,7 +317,6 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Changes placement and caching policy of the buffer object
@@ -349,7 +329,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
*/
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait_reserve,
+ bool interruptible,
bool no_wait_gpu);
/**
@@ -423,8 +403,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
* @no_wait: Return immediately if buffer is busy.
*
* Synchronizes a buffer object for CPU RW access. This means
- * blocking command submission that affects the buffer and
- * waiting for buffer idle. This lock is recursive.
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
@@ -466,8 +447,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -499,23 +478,20 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interrubtible,
struct file *persistent_swap_storage,
size_t acc_size,
+ struct sg_table *sg,
void (*destroy) (struct ttm_buffer_object *));
/**
- * ttm_bo_synccpu_object_init
+ * ttm_bo_create
*
* @bdev: Pointer to a ttm_bo_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
- * @flags: Initial placement flags.
+ * @placement: Initial placement.
* @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -538,7 +514,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
- unsigned long buffer_start,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo);
@@ -728,5 +703,5 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
size_t count, loff_t *f_pos, bool write);
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-
+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d43e892307f..a5183da3ef9 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -30,16 +30,17 @@
#ifndef _TTM_BO_DRIVER_H_
#define _TTM_BO_DRIVER_H_
-#include "ttm/ttm_bo_api.h"
-#include "ttm/ttm_memory.h"
-#include "ttm/ttm_module.h"
-#include "drm_mm.h"
-#include "drm_global.h"
-#include "linux/workqueue.h"
-#include "linux/fs.h"
-#include "linux/spinlock.h"
-
-struct ttm_backend;
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_memory.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_placement.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_global.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/reservation.h>
struct ttm_backend_func {
/**
@@ -81,6 +82,7 @@ struct ttm_backend_func {
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
enum ttm_caching_state {
tt_uncached,
@@ -116,8 +118,8 @@ struct ttm_tt {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
struct ttm_bo_global *glob;
- struct ttm_backend *be;
struct file *swap_storage;
enum ttm_caching_state caching_state;
enum {
@@ -395,7 +397,7 @@ struct ttm_bo_driver {
*/
int (*move) (struct ttm_buffer_object *bo,
bool evict, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
@@ -423,10 +425,10 @@ struct ttm_bo_driver {
* documentation.
*/
- bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
- int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
+ bool (*sync_obj_signaled) (void *sync_obj);
+ int (*sync_obj_wait) (void *sync_obj,
bool lazy, bool interruptible);
- int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
+ int (*sync_obj_flush) (void *sync_obj);
void (*sync_obj_unref) (void **sync_obj);
void *(*sync_obj_ref) (void *sync_obj);
@@ -518,12 +520,10 @@ struct ttm_bo_global {
* @man: An array of mem_type_managers.
* @fence_lock: Protects the synchronizing members on *all* bos belonging
* to this device.
- * @addr_space_mm: Range manager for the device address space.
+ * @vma_manager: Address space manager
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
* @val_seq: Current validation sequence.
- * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
- * If a GPU lockup has been detected, this is forced to 0.
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
@@ -538,14 +538,13 @@ struct ttm_bo_device {
struct list_head device_list;
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
- rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
spinlock_t fence_lock;
+
/*
- * Protected by the vm lock.
+ * Protected by internal locks.
*/
- struct rb_root addr_space_rb;
- struct drm_mm addr_space_mm;
+ struct drm_vma_offset_manager vma_manager;
/*
* Protected by the global:lru lock.
@@ -557,7 +556,6 @@ struct ttm_bo_device {
* Protected by load / firstopen / lastclose /unload sync.
*/
- bool nice_mode;
struct address_space *dev_mapping;
/*
@@ -683,6 +681,15 @@ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
extern int ttm_tt_swapout(struct ttm_tt *ttm,
struct file *persistent_swap_storage);
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
/*
* ttm_bo.c
*/
@@ -707,7 +714,6 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* @proposed_placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_mem_reg.
* @interruptible: Sleep interruptible when sliping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Allocate memory space for the buffer object pointed to by @bo, using
@@ -723,27 +729,13 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu);
+ bool no_wait_gpu);
extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-/**
- * ttm_bo_wait_for_cpu
- *
- * @bo: Pointer to a struct ttm_buffer_object.
- * @no_wait: Don't sleep while waiting.
- *
- * Wait until a buffer object is no longer sync'ed for CPU access.
- * Returns:
- * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-
-extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-
extern void ttm_bo_global_release(struct drm_global_reference *ref);
extern int ttm_bo_global_init(struct drm_global_reference *ref);
@@ -755,6 +747,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @bdev: A pointer to a struct ttm_bo_device to initialize.
* @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @mapping: The address space to use for this bo.
* @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the
* address space.
@@ -766,6 +759,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
+ struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
/**
@@ -790,6 +784,55 @@ extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
bool interruptible);
extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * __ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_ticket: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @ticket->stamp is older.
+ *
+ * Will not remove reserved buffers from the lru lists.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EDEADLK: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
+ */
+static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_ticket,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret = 0;
+
+ if (no_wait) {
+ bool success;
+ if (WARN_ON(ticket))
+ return -EBUSY;
+
+ success = ww_mutex_trylock(&bo->resv->lock);
+ return success ? 0 : -EBUSY;
+ }
+
+ if (interruptible)
+ ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
+ else
+ ret = ww_mutex_lock(&bo->resv->lock, ticket);
+ if (ret == -EINTR)
+ return -ERESTARTSYS;
+ return ret;
+}
/**
* ttm_bo_reserve:
@@ -797,8 +840,8 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_sequence: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @sequence < (@bo)->sequence.
+ * @use_ticket: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @ticket->stamp is older.
*
* Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking
@@ -809,19 +852,10 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
* to make room for a buffer already reserved. (Buffers are reserved before
* they are evicted). The following algorithm prevents such deadlocks from
* occurring:
- * 1) Buffers are reserved with the lru spinlock held. Upon successful
- * reservation they are removed from the lru list. This stops a reserved buffer
- * from being evicted. However the lru spinlock is released between the time
- * a buffer is selected for eviction and the time it is reserved.
- * Therefore a check is made when a buffer is reserved for eviction, that it
- * is still the first buffer in the lru list, before it is removed from the
- * list. @check_lru == 1 forces this check. If it fails, the function returns
- * -EINVAL, and the caller should then choose a new buffer to evict and repeat
- * the procedure.
- * 2) Processes attempting to reserve multiple buffers other than for eviction,
+ * Processes attempting to reserve multiple buffers other than for eviction,
* (typically execbuf), should first obtain a unique 32-bit
* validation sequence number,
- * and call this function with @use_sequence == 1 and @sequence == the unique
+ * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
* sequence number. If upon call of this function, the buffer object is already
* reserved, the validation sequence is checked against the validation
* sequence of the process currently reserving the buffer,
@@ -836,90 +870,118 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
* will eventually succeed, preventing both deadlocks and starvation.
*
* Returns:
- * -EAGAIN: The reservation may cause a deadlock.
+ * -EDEADLK: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
* -EBUSY: The function needed to sleep, but @no_wait was true
- * -EDEADLK: Bo already reserved using @sequence. This error code will only
- * be returned if @use_sequence is set to true.
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
*/
-extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence, uint32_t sequence);
+static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_ticket,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret;
+
+ WARN_ON(!atomic_read(&bo->kref.refcount));
+ ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
+ if (likely(ret == 0))
+ ttm_bo_del_sub_from_lru(bo);
+
+ return ret;
+}
/**
- * ttm_bo_reserve_locked:
- *
+ * ttm_bo_reserve_slowpath:
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_sequence: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @sequence < (@bo)->sequence.
- *
- * Must be called with struct ttm_bo_global::lru_lock held,
- * and will not remove reserved buffers from the lru lists.
- * The function may release the LRU spinlock if it needs to sleep.
- * Otherwise identical to ttm_bo_reserve.
+ * @sequence: Set (@bo)->sequence to this value after lock
*
- * Returns:
- * -EAGAIN: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again. (only if use_sequence == 1).
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EDEADLK: Bo already reserved using @sequence. This error code will only
- * be returned if @use_sequence is set to true.
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
*/
-extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence,
- uint32_t sequence);
+static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret = 0;
+
+ WARN_ON(!atomic_read(&bo->kref.refcount));
+
+ if (interruptible)
+ ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+ ticket);
+ else
+ ww_mutex_lock_slow(&bo->resv->lock, ticket);
+
+ if (likely(ret == 0))
+ ttm_bo_del_sub_from_lru(bo);
+ else if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+
+ return ret;
+}
/**
- * ttm_bo_unreserve
- *
+ * __ttm_bo_unreserve
* @bo: A pointer to a struct ttm_buffer_object.
*
- * Unreserve a previous reservation of @bo.
+ * Unreserve a previous reservation of @bo where the buffer object is
+ * already on lru lists.
*/
-extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
+static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ ww_mutex_unlock(&bo->resv->lock);
+}
/**
- * ttm_bo_unreserve_locked
+ * ttm_bo_unreserve
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unreserve a previous reservation of @bo.
- * Needs to be called with struct ttm_bo_global::lru_lock held.
*/
-extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+ spin_lock(&bo->glob->lru_lock);
+ ttm_bo_add_to_lru(bo);
+ spin_unlock(&bo->glob->lru_lock);
+ }
+ __ttm_bo_unreserve(bo);
+}
/**
- * ttm_bo_wait_unreserved
- *
+ * ttm_bo_unreserve_ticket
* @bo: A pointer to a struct ttm_buffer_object.
+ * @ticket: ww_acquire_ctx used for reserving
*
- * Wait for a struct ttm_buffer_object to become unreserved.
- * This is typically used in the execbuf code to relax cpu-usage when
- * a potential deadlock condition backoff.
+ * Unreserve a previous reservation of @bo made with @ticket.
*/
-extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
- bool interruptible);
+static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
+ struct ww_acquire_ctx *t)
+{
+ ttm_bo_unreserve(bo);
+}
/*
* ttm_bo_util.c
*/
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/**
* ttm_bo_move_ttm
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -934,15 +996,14 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -957,8 +1018,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_reserve,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
/**
* ttm_bo_free_old_node
@@ -974,10 +1035,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*
* @bo: A pointer to a struct ttm_buffer_object.
* @sync_obj: A sync object that signals when moving is complete.
- * @sync_obj_arg: An argument to pass to the sync object idle / wait
- * functions.
* @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -991,9 +1049,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- void *sync_obj_arg,
- bool evict, bool no_wait_reserve,
- bool no_wait_gpu,
+ bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
* ttm_io_prot
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 26cc7f9ffa4..16db7d01a33 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -31,7 +31,7 @@
#ifndef _TTM_EXECBUF_UTIL_H_
#define _TTM_EXECBUF_UTIL_H_
-#include "ttm/ttm_bo_api.h"
+#include <ttm/ttm_bo_api.h>
#include <linux/list.h>
/**
@@ -39,8 +39,6 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
- * adding a new sync object.
* @reserved: Indicates whether @bo has been reserved for validation.
* @removed: Indicates whether @bo has been removed from lru lists.
* @put_count: Number of outstanding references on bo::list_kref.
@@ -50,7 +48,6 @@
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- void *new_sync_obj_arg;
bool reserved;
bool removed;
int put_count;
@@ -60,17 +57,21 @@ struct ttm_validate_buffer {
/**
* function ttm_eu_backoff_reservation
*
+ * @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
*
* Undoes all buffer validation reservations for bos pointed to by
* the list entries.
*/
-extern void ttm_eu_backoff_reservation(struct list_head *list);
+extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct list_head *list);
/**
* function ttm_eu_reserve_buffers
*
+ * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
+ * non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
*
* Tries to reserve bos pointed to by the list entries for validation.
@@ -93,11 +94,13 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* has failed.
*/
-extern int ttm_eu_reserve_buffers(struct list_head *list);
+extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+ struct list_head *list);
/**
* function ttm_eu_fence_buffer_objects.
*
+ * @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
* @sync_obj: The new sync object for the buffers.
*
@@ -107,6 +110,7 @@ extern int ttm_eu_reserve_buffers(struct list_head *list);
*
*/
-extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
+extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+ struct list_head *list, void *sync_obj);
#endif
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
index 2e7f0c941b5..2902beb5f68 100644
--- a/include/drm/ttm/ttm_lock.h
+++ b/include/drm/ttm/ttm_lock.h
@@ -49,7 +49,7 @@
#ifndef _TTM_LOCK_H_
#define _TTM_LOCK_H_
-#include "ttm/ttm_object.h"
+#include <ttm/ttm_object.h>
#include <linux/wait.h>
#include <linux/atomic.h>
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 26c1f78d136..72dcbe81dd0 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -30,6 +30,7 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
+#include <linux/bug.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kobject.h>
@@ -59,7 +60,6 @@ struct ttm_mem_shrink {
* for the GPU, and this will otherwise block other workqueue tasks(?)
* At this point we use only a single-threaded workqueue.
* @work: The workqueue callback for the shrink queue.
- * @queue: Wait queue for processes suspended waiting for memory.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
* @zones: Array of pointers to accounting zones.
@@ -79,7 +79,6 @@ struct ttm_mem_global {
struct ttm_mem_shrink *shrink;
struct workqueue_struct *swap_queue;
struct work_struct work;
- wait_queue_head_t queue;
spinlock_t lock;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index e46054e5255..ed953f98f0e 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -38,8 +38,10 @@
#define _TTM_OBJECT_H_
#include <linux/list.h>
-#include "drm_hashtab.h"
+#include <drm/drm_hashtab.h>
#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/dma-buf.h>
#include <ttm/ttm_memory.h>
/**
@@ -76,6 +78,7 @@ enum ttm_object_type {
ttm_fence_type,
ttm_buffer_type,
ttm_lock_type,
+ ttm_prime_type,
ttm_driver_type0 = 256,
ttm_driver_type1,
ttm_driver_type2,
@@ -120,6 +123,7 @@ struct ttm_object_device;
*/
struct ttm_base_object {
+ struct rcu_head rhead;
struct drm_hash_item hash;
enum ttm_object_type object_type;
bool shareable;
@@ -130,6 +134,30 @@ struct ttm_base_object {
enum ttm_ref_type ref_type);
};
+
+/**
+ * struct ttm_prime_object - Modified base object that is prime-aware
+ *
+ * @base: struct ttm_base_object that we derive from
+ * @mutex: Mutex protecting the @dma_buf member.
+ * @size: Size of the dma_buf associated with this object
+ * @real_type: Type of the underlying object. Needed since we're setting
+ * the value of @base::object_type to ttm_prime_type
+ * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
+ * object.
+ * @refcount_release: The underlying object's release method. Needed since
+ * we set @base::refcount_release to our own release method.
+ */
+
+struct ttm_prime_object {
+ struct ttm_base_object base;
+ struct mutex mutex;
+ size_t size;
+ enum ttm_object_type real_type;
+ struct dma_buf *dma_buf;
+ void (*refcount_release) (struct ttm_base_object **);
+};
+
/**
* ttm_base_object_init
*
@@ -162,14 +190,26 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
* @key: Hash key
*
* Looks up a struct ttm_base_object with the key @key.
- * Also verifies that the object is visible to the application, by
- * comparing the @tfile argument and checking the object shareable flag.
*/
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
*tfile, uint32_t key);
/**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
+/**
* ttm_base_object_unref
*
* @p_base: Pointer to a pointer referencing a struct ttm_base_object.
@@ -190,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
* @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead.
*
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
* Adding a ref object to a base object is basically like referencing the
* base object, but a user-space application holds the reference. When the
* file corresponding to @tfile is closed, all its reference objects are
@@ -202,6 +244,10 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
enum ttm_ref_type ref_type, bool *existed);
+
+extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+ struct ttm_base_object *base);
+
/**
* ttm_ref_object_base_unref
*
@@ -246,14 +292,18 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
+ * @mem_glob: struct ttm_mem_global for memory accounting.
* @hash_order: Order of hash table used to hash the base objects.
+ * @ops: DMA buf ops for prime objects of this device.
*
* This function is typically called on device initialization to prepare
* data structures needed for ttm base and ref objects.
*/
-extern struct ttm_object_device *ttm_object_device_init
- (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+extern struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops);
/**
* ttm_object_device_release - release data held by a ttm_object_device
@@ -268,4 +318,33 @@ extern struct ttm_object_device *ttm_object_device_init
extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+#define ttm_base_object_kfree(__object, __base)\
+ kfree_rcu(__object, __base.rhead)
+
+extern int ttm_prime_object_init(struct ttm_object_file *tfile,
+ size_t size,
+ struct ttm_prime_object *prime,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release)
+ (struct ttm_base_object **),
+ void (*ref_obj_release)
+ (struct ttm_base_object *,
+ enum ttm_ref_type ref_type));
+
+static inline enum ttm_object_type
+ttm_base_object_type(struct ttm_base_object *base)
+{
+ return (base->object_type == ttm_prime_type) ?
+ container_of(base, struct ttm_prime_object, base)->real_type :
+ base->object_type;
+}
+extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle);
+extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+#define ttm_prime_object_kfree(__obj, __prime) \
+ kfree_rcu(__obj, __prime.base.rhead)
#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 5fe27400d17..49a828425fa 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -26,8 +26,10 @@
#ifndef TTM_PAGE_ALLOC
#define TTM_PAGE_ALLOC
-#include "ttm_bo_driver.h"
-#include "ttm_memory.h"
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_memory.h>
+
+struct device;
/**
* Initialize pool allocator.
@@ -62,7 +64,7 @@ extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-#ifdef CONFIG_SWIOTLB
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
/**
* Initialize pool allocator.
*/
@@ -94,6 +96,15 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
return 0;
}
+static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ return -ENOMEM;
+}
+static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+}
#endif
#endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index c84ff153a56..8ed44f9bbdf 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -65,6 +65,8 @@
* reference the buffer.
* TTM_PL_FLAG_NO_EVICT means that the buffer may never
* be evicted to make room for other buffers.
+ * TTM_PL_FLAG_TOPDOWN requests to be placed from the
+ * top of the memory area, instead of the bottom.
*/
#define TTM_PL_FLAG_CACHED (1 << 16)
@@ -72,6 +74,7 @@
#define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_SHARED (1 << 20)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
+#define TTM_PL_FLAG_TOPDOWN (1 << 22)
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
TTM_PL_FLAG_UNCACHED | \