aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_tiling.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_tiling.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c331
1 files changed, 190 insertions, 141 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 540dd336e6e..cb150e8b433 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,11 +25,10 @@
*
*/
-#include "linux/string.h"
-#include "linux/bitops.h"
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
/** @file i915_gem_tiling.c
@@ -88,20 +87,47 @@
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (!IS_I9XX(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated with
+ * identically sized dimms. We don't need to check the 3rd
+ * channel because no cpu with gpu attached ships in that
+ * configuration. Also, swizzling only makes sense for 2
+ * channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else if (IS_GEN5(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev)) {
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
- /* On mobile 9xx chipsets, channel interleave by the CPU is
+ /* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
@@ -174,35 +200,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
-
-/**
- * Returns the size of the fence for a tiled object of the given size.
- */
-static int
-i915_get_fence_size(struct drm_device *dev, int size)
-{
- int i;
- int start;
-
- if (IS_I965G(dev)) {
- /* The 965 can have fences at any page boundary. */
- return ALIGN(size, 4096);
- } else {
- /* Align the size to a power of two greater than the smallest
- * fence size.
- */
- if (IS_I9XX(dev))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
-
- for (i = start; i < size; i <<= 1)
- ;
-
- return i;
- }
-}
-
/* Check pitch constriants for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -213,53 +210,76 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
- if (!IS_I9XX(dev) ||
+ if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
- if (IS_I965G(dev)) {
- /* i965 stores the end address of the gtt mapping in the fence
- * reg, so dont bother to check the size */
- if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+ /* i965+ stores the end address of the gtt mapping in the fence
+ * reg, so dont bother to check the size */
+ if (INTEL_INFO(dev)->gen >= 7) {
+ if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_I9XX(dev)) {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
- * instead of 4 (2KB) on 945s.
- */
- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 20))
+ } else if (INTEL_INFO(dev)->gen >= 4) {
+ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 19))
+ if (stride > 8192)
return false;
+
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
}
+ if (stride < tile_width)
+ return false;
+
/* 965+ just needs multiples of tile width */
- if (IS_I965G(dev)) {
+ if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
}
/* Pre-965 needs power of two tile widths */
- if (stride < tile_width)
+ if (stride & (stride - 1))
return false;
- if (stride & (stride - 1))
+ return true;
+}
+
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+ u32 size;
+
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
+ return true;
+
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
+ return false;
+ } else {
+ if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
+ return false;
+ }
+
+ size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+ if (i915_gem_obj_ggtt_size(obj) != size)
return false;
- /* We don't handle the aperture area covered by the fence being bigger
- * than the object size.
- */
- if (i915_get_fence_size(dev, size) != size)
+ if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
return true;
@@ -271,27 +291,31 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
- obj_priv = obj->driver_private;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
- if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- drm_gem_object_unreference(obj);
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
+ if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return -EBUSY;
+ }
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
@@ -314,32 +338,73 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
args->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
}
}
- if (args->tiling_mode != obj_priv->tiling_mode) {
- int ret;
- /* Unbind the object, as switching tiling means we're
- * switching the cache organization due to fencing, probably.
+ mutex_lock(&dev->struct_mutex);
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
+ /* We need to rebind the object if its current allocation
+ * no longer meets the alignment restrictions for its new
+ * tiling mode. Otherwise we can just leave it alone, but
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
*/
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- WARN(ret != -ERESTARTSYS,
- "failed to unbind object for tiling switch");
- args->tiling_mode = obj_priv->tiling_mode;
- mutex_unlock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
-
- return ret;
+
+ obj->map_and_fenceable =
+ !i915_gem_obj_ggtt_bound(obj) ||
+ (i915_gem_obj_ggtt_offset(obj) +
+ obj->base.size <= dev_priv->gtt.mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
+
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_align =
+ i915_gem_get_gtt_alignment(dev, obj->base.size,
+ args->tiling_mode,
+ false);
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
+ ret = i915_gem_object_ggtt_unbind(obj);
+ }
+
+ if (ret == 0) {
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
+
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
}
- obj_priv->tiling_mode = args->tiling_mode;
}
- obj_priv->stride = args->stride;
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ kfree(obj->bit_17);
+ obj->bit_17 = NULL;
+ }
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
/**
@@ -347,22 +412,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
- obj_priv = obj->driver_private;
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
mutex_lock(&dev->struct_mutex);
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
@@ -382,7 +445,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -393,16 +456,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static int
+static void
i915_gem_swizzle_page(struct page *page)
{
+ char temp[64];
char *vaddr;
int i;
- char temp[64];
vaddr = kmap(page);
- if (vaddr == NULL)
- return -ENOMEM;
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
@@ -411,65 +472,53 @@ i915_gem_swizzle_page(struct page *page)
}
kunmap(page);
-
- return 0;
}
void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count = obj->size >> PAGE_SHIFT;
+ struct sg_page_iter sg_iter;
int i;
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+ if (obj->bit_17 == NULL)
return;
- if (obj_priv->bit_17 == NULL)
- return;
-
- for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj_priv->bit_17) != 0)) {
- int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
- if (ret != 0) {
- DRM_ERROR("Failed to swizzle page\n");
- return;
- }
- set_page_dirty(obj_priv->pages[i]);
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
}
+ i++;
}
}
void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count = obj->size >> PAGE_SHIFT;
+ struct sg_page_iter sg_iter;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
- if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
- return;
-
- if (obj_priv->bit_17 == NULL) {
- obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
- sizeof(long), GFP_KERNEL);
- if (obj_priv->bit_17 == NULL) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+ sizeof(long), GFP_KERNEL);
+ if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
- for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
- __set_bit(i, obj_priv->bit_17);
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+ if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+ __set_bit(i, obj->bit_17);
else
- __clear_bit(i, obj_priv->bit_17);
+ __clear_bit(i, obj->bit_17);
+ i++;
}
}