aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 14:19:54 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-15 14:19:54 +0900
commit049ffa8ab33a63b3bff672d1a0ee6a35ad253fe8 (patch)
tree70f4c684818b1c9871fa800088427e40d260592e /drivers/gpu/drm/i915/i915_gem_execbuffer.c
parentc681427e5ca22925fcc1be76a2e260a11e0a8498 (diff)
parent0846c728e20a0cd1e43fb75a3015f3b176a26466 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is a combo of -next and some -fixes that came in in the intervening time. Highlights: New drivers: ARM Armada driver for Marvell Armada 510 SOCs Intel: Broadwell initial support under a default off switch, Stereo/3D HDMI mode support Valleyview improvements Displayport improvements Haswell fixes initial mipi dsi panel support CRC support for debugging build with CONFIG_FB=n Radeon: enable DPM on a number of GPUs by default secondary GPU powerdown support enable HDMI audio by default Hawaii support Nouveau: dynamic pm code infrastructure reworked, does nothing major yet GK208 modesetting support MSI fixes, on by default again PMPEG improvements pageflipping fixes GMA500: minnowboard SDVO support VMware: misc fixes MSM: prime, plane and rendernodes support Tegra: rearchitected to put the drm driver into the drm subsystem. HDMI and gr2d support for tegra 114 SoC QXL: oops fix, and multi-head fixes DRM core: sysfs lifetime fixes client capability ioctl further cleanups to device midlayer more vblank timestamp fixes" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (789 commits) drm/nouveau: do not map evicted vram buffers in nouveau_bo_vma_add drm/nvc0-/gr: shift wrapping bug in nvc0_grctx_generate_r406800 drm/nouveau/pwr: fix missing mutex unlock in a failure path drm/nv40/therm: fix slowing down fan when pstate undefined drm/nv11-: synchronise flips to vblank, unless async flip requested drm/nvc0-: remove nasty fifo swmthd hack for flip completion method drm/nv10-: we no longer need to create nvsw object on user channels drm/nouveau: always queue flips relative to kernel channel activity drm/nouveau: there is no need to reserve/fence the new fb when flipping drm/nouveau: when bailing out of a pushbuf ioctl, do not remove previous fence drm/nouveau: allow nouveau_fence_ref() to be a noop drm/nvc8/mc: msi rearm is via the nvc0 method drm/ttm: Fix vma page_prot bit manipulation drm/vmwgfx: Fix a couple of compile / sparse warnings and errors drm/vmwgfx: Resource evict fixes drm/edid: compare actual vrefresh for all modes for quirks drm: shmob_drm: Convert to clk_prepare/unprepare drm/nouveau: fix 32-bit build drm/i915/opregion: fix build error on CONFIG_ACPI=n Revert "drm/radeon/audio: don't set speaker allocation on DCE4+" ...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c401
1 files changed, 243 insertions, 158 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf345777ae9..885d595e0e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
-struct eb_objects {
- struct list_head objects;
+struct eb_vmas {
+ struct list_head vmas;
int and;
union {
- struct drm_i915_gem_object *lut[0];
+ struct i915_vma *lut[0];
struct hlist_head buckets[0];
};
};
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
{
- struct eb_objects *eb = NULL;
+ struct eb_vmas *eb = NULL;
if (args->flags & I915_EXEC_HANDLE_LUT) {
- int size = args->buffer_count;
- size *= sizeof(struct drm_i915_gem_object *);
- size += sizeof(struct eb_objects);
+ unsigned size = args->buffer_count;
+ size *= sizeof(struct i915_vma *);
+ size += sizeof(struct eb_vmas);
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
}
if (eb == NULL) {
- int size = args->buffer_count;
- int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ unsigned size = args->buffer_count;
+ unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
- sizeof(struct eb_objects),
+ sizeof(struct eb_vmas),
GFP_TEMPORARY);
if (eb == NULL)
return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
} else
eb->and = -args->buffer_count;
- INIT_LIST_HEAD(&eb->objects);
+ INIT_LIST_HEAD(&eb->vmas);
return eb;
}
static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static int
-eb_lookup_objects(struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- const struct drm_i915_gem_execbuffer2 *args,
- struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ const struct drm_i915_gem_execbuffer2 *args,
+ struct i915_address_space *vm,
+ struct drm_file *file)
{
- int i;
+ struct drm_i915_gem_object *obj;
+ struct list_head objects;
+ int i, ret = 0;
+ INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
+ /* Grab a reference to the object and release the lock so we can lookup
+ * or create the VMA without using GFP_ATOMIC */
for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj;
-
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
- return -ENOENT;
+ ret = -ENOENT;
+ goto out;
}
- if (!list_empty(&obj->exec_list)) {
+ if (!list_empty(&obj->obj_exec_link)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
drm_gem_object_reference(&obj->base);
- list_add_tail(&obj->exec_list, &eb->objects);
+ list_add_tail(&obj->obj_exec_link, &objects);
+ }
+ spin_unlock(&file->table_lock);
- obj->exec_entry = &exec[i];
+ i = 0;
+ list_for_each_entry(obj, &objects, obj_exec_link) {
+ struct i915_vma *vma;
+
+ /*
+ * NOTE: We can leak any vmas created here when something fails
+ * later on. But that's no issue since vma_unbind can deal with
+ * vmas which are not actually bound. And since only
+ * lookup_or_create exists as an interface to get at the vma
+ * from the (obj, vm) we don't run the risk of creating
+ * duplicated vmas for the same vm.
+ */
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG("Failed to lookup VMA\n");
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ list_add_tail(&vma->exec_list, &eb->vmas);
+
+ vma->exec_entry = &exec[i];
if (eb->and < 0) {
- eb->lut[i] = obj;
+ eb->lut[i] = vma;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
- obj->exec_handle = handle;
- hlist_add_head(&obj->exec_node,
+ vma->exec_handle = handle;
+ hlist_add_head(&vma->exec_node,
&eb->buckets[handle & eb->and]);
}
+ ++i;
}
- spin_unlock(&file->table_lock);
- return 0;
+
+out:
+ while (!list_empty(&objects)) {
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+ list_del_init(&obj->obj_exec_link);
+ if (ret)
+ drm_gem_object_unreference(&obj->base);
+ }
+ return ret;
}
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
{
if (eb->and < 0) {
if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
- if (obj->exec_handle == handle)
- return obj;
+ vma = hlist_entry(node, struct i915_vma, exec_node);
+ if (vma->exec_handle == handle)
+ return vma;
}
return NULL;
}
}
-static void
-eb_destroy(struct eb_objects *eb)
-{
- while (!list_empty(&eb->objects)) {
- struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+ while (!list_empty(&eb->vmas)) {
+ struct i915_vma *vma;
- obj = list_first_entry(&eb->objects,
- struct drm_i915_gem_object,
+ vma = list_first_entry(&eb->vmas,
+ struct i915_vma,
exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ list_del_init(&vma->exec_list);
+ drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
- return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ return (HAS_LLC(obj->base.dev) ||
+ obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
!obj->map_and_fenceable ||
obj->cache_level != I915_CACHE_NONE);
}
@@ -175,17 +212,31 @@ static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
struct drm_i915_gem_relocation_entry *reloc)
{
+ struct drm_device *dev = obj->base.dev;
uint32_t page_offset = offset_in_page(reloc->offset);
char *vaddr;
int ret = -EINVAL;
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
return ret;
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+ if (page_offset == 0) {
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ }
+
+ *(uint32_t *)(vaddr + page_offset) = 0;
+ }
+
kunmap_atomic(vaddr);
return 0;
@@ -216,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
reloc_entry = (uint32_t __iomem *)
(reloc_page + offset_in_page(reloc->offset));
iowrite32(reloc->delta, reloc_entry);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ reloc_entry += 1;
+
+ if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+ io_mapping_unmap_atomic(reloc_page);
+ reloc_page = io_mapping_map_atomic_wc(
+ dev_priv->gtt.mappable,
+ reloc->offset + sizeof(uint32_t));
+ reloc_entry = reloc_page;
+ }
+
+ iowrite32(0, reloc_entry);
+ }
+
io_mapping_unmap_atomic(reloc_page);
return 0;
@@ -223,22 +289,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
+ struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
+ struct i915_vma *target_vma;
uint32_t target_offset;
int ret = -EINVAL;
/* we've already hold a reference to all valid objects */
- target_obj = &eb_get_object(eb, reloc->target_handle)->base;
- if (unlikely(target_obj == NULL))
+ target_vma = eb_get_vma(eb, reloc->target_handle);
+ if (unlikely(target_vma == NULL))
return -ENOENT;
+ target_i915_obj = target_vma->obj;
+ target_obj = &target_vma->obj->base;
- target_i915_obj = to_intel_bo(target_obj);
target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -284,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return 0;
/* Check that the relocation address is valid... */
- if (unlikely(reloc->offset > obj->base.size - 4)) {
+ if (unlikely(reloc->offset >
+ obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
@@ -320,14 +389,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+ struct eb_vmas *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +414,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
- vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+ vma->vm);
if (ret)
return ret;
@@ -368,17 +436,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
- struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *relocs,
- struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+ struct eb_vmas *eb,
+ struct drm_i915_gem_relocation_entry *relocs)
{
- const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
- vm);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+ vma->vm);
if (ret)
return ret;
}
@@ -387,10 +454,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
struct i915_address_space *vm)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +468,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
* lockdep complains vehemently.
*/
pagefault_disable();
- list_for_each_entry(obj, &eb->objects, exec_list) {
- ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ ret = i915_gem_execbuffer_relocate_vma(vma, eb);
if (ret)
break;
}
@@ -415,31 +482,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
{
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- return entry->relocation_count && !use_cpu_reloc(obj);
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+ i915_is_ggtt(vma->vm);
}
static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- struct i915_address_space *vm,
- bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+ struct intel_ring_buffer *ring,
+ bool *need_reloc)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable;
+ struct drm_i915_gem_object *obj = vma->obj;
int ret;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
- ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+ ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
false);
if (ret)
return ret;
@@ -467,8 +535,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != i915_gem_obj_offset(obj, vm)) {
- entry->offset = i915_gem_obj_offset(obj, vm);
+ if (entry->offset != vma->node.start) {
+ entry->offset = vma->node.start;
*need_reloc = true;
}
@@ -485,14 +553,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
+ struct drm_i915_gem_object *obj = vma->obj;
- if (!i915_gem_obj_bound_any(obj))
+ if (!drm_mm_node_allocated(&vma->node))
return;
- entry = obj->exec_entry;
+ entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
@@ -505,41 +574,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
- struct list_head *objects,
- struct i915_address_space *vm,
+ struct list_head *vmas,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
- struct list_head ordered_objects;
+ struct i915_vma *vma;
+ struct i915_address_space *vm;
+ struct list_head ordered_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
- INIT_LIST_HEAD(&ordered_objects);
- while (!list_empty(objects)) {
+ if (list_empty(vmas))
+ return 0;
+
+ vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+ INIT_LIST_HEAD(&ordered_vmas);
+ while (!list_empty(vmas)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
- obj = list_first_entry(objects,
- struct drm_i915_gem_object,
- exec_list);
- entry = obj->exec_entry;
+ vma = list_first_entry(vmas, struct i915_vma, exec_list);
+ obj = vma->obj;
+ entry = vma->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
if (need_mappable)
- list_move(&obj->exec_list, &ordered_objects);
+ list_move(&vma->exec_list, &ordered_vmas);
else
- list_move_tail(&obj->exec_list, &ordered_objects);
+ list_move_tail(&vma->exec_list, &ordered_vmas);
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
- list_splice(&ordered_objects, objects);
+ list_splice(&ordered_vmas, vmas);
/* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases:
@@ -558,52 +632,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
int ret = 0;
/* Unbind any ill-fitting objects or pin. */
- list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool need_fence, need_mappable;
- u32 obj_offset;
- if (!i915_gem_obj_bound(obj, vm))
+ obj = vma->obj;
+
+ if (!drm_mm_node_allocated(&vma->node))
continue;
- obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable = need_fence || need_reloc_mappable(obj);
+ need_mappable = need_fence || need_reloc_mappable(vma);
WARN_ON((need_mappable || need_fence) &&
- !i915_is_ggtt(vm));
+ !i915_is_ggtt(vma->vm));
if ((entry->alignment &&
- obj_offset & (entry->alignment - 1)) ||
+ vma->node.start & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
+ ret = i915_vma_unbind(vma);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
- list_for_each_entry(obj, objects, exec_list) {
- if (i915_gem_obj_bound(obj, vm))
+ list_for_each_entry(vma, vmas, exec_list) {
+ if (drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
if (ret)
goto err;
}
err: /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list)
- i915_gem_execbuffer_unreserve_object(obj);
+ list_for_each_entry(vma, vmas, exec_list)
+ i915_gem_execbuffer_unreserve_vma(vma);
if (ret != -ENOSPC || retry++)
return ret;
- ret = i915_gem_evict_everything(ring->dev);
+ ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
} while (1);
@@ -614,24 +688,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
- struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec,
- struct i915_address_space *vm)
+ struct eb_vmas *eb,
+ struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
- struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
bool need_relocs;
int *reloc_offset;
int i, total, ret;
- int count = args->buffer_count;
+ unsigned count = args->buffer_count;
+
+ if (WARN_ON(list_empty(&eb->vmas)))
+ return 0;
+
+ vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
/* We may process another execbuffer during the unlock... */
- while (!list_empty(&eb->objects)) {
- obj = list_first_entry(&eb->objects,
- struct drm_i915_gem_object,
- exec_list);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ while (!list_empty(&eb->vmas)) {
+ vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+ list_del_init(&vma->exec_list);
+ drm_gem_object_unreference(&vma->obj->base);
}
mutex_unlock(&dev->struct_mutex);
@@ -695,20 +772,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
- list_for_each_entry(obj, &eb->objects, exec_list) {
- int offset = obj->exec_entry - exec;
- ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + reloc_offset[offset],
- vm);
+ list_for_each_entry(vma, &eb->vmas, exec_list) {
+ int offset = vma->exec_entry - exec;
+ ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+ reloc + reloc_offset[offset]);
if (ret)
goto err;
}
@@ -727,14 +803,15 @@ err:
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
- struct list_head *objects)
+ struct list_head *vmas)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
@@ -771,8 +848,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
- int relocs_total = 0;
- int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+ unsigned relocs_total = 0;
+ unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) {
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +886,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
}
static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_ring_buffer *ring)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- list_for_each_entry(obj, objects, exec_list) {
+ list_for_each_entry(vma, vmas, exec_list) {
+ struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
@@ -825,9 +902,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- /* FIXME: This lookup gets fixed later <-- danvet */
- list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
- i915_gem_object_move_to_active(obj, ring);
+ i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +960,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct eb_objects *eb;
+ struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
+ struct i915_ctx_hang_stats *hs;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask, flags;
@@ -1000,7 +1076,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
@@ -1025,7 +1102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- eb = eb_create(args);
+ eb = eb_create(args, vm);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@@ -1033,18 +1110,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
- ret = eb_lookup_objects(eb, exec, args, file);
+ ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = list_entry(eb->objects.prev,
- struct drm_i915_gem_object,
- exec_list);
+ batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
if (ret)
goto err;
@@ -1054,7 +1129,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec, vm);
+ eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1071,15 +1146,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
- * hsw should have this fixed, but let's be paranoid and do it
- * unconditionally for now. */
+ * hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
if (ret)
goto err;
+ hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+ if (IS_ERR(hs)) {
+ ret = PTR_ERR(hs);
+ goto err;
+ }
+
+ if (hs->banned) {
+ ret = -EIO;
+ goto err;
+ }
+
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1131,7 +1216,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+ i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err: